2024-11-22 04:33:23,454 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-22 04:33:23,465 main DEBUG Took 0.009072 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-22 04:33:23,465 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-22 04:33:23,465 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-22 04:33:23,466 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-22 04:33:23,467 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 04:33:23,474 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-22 04:33:23,486 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 04:33:23,487 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 04:33:23,487 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 04:33:23,488 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 04:33:23,488 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 04:33:23,488 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 04:33:23,489 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 04:33:23,490 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 04:33:23,490 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 04:33:23,490 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 04:33:23,491 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 04:33:23,491 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 04:33:23,492 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 04:33:23,492 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 04:33:23,492 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 04:33:23,493 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 04:33:23,493 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 04:33:23,493 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 04:33:23,494 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 04:33:23,494 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 04:33:23,495 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 04:33:23,495 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 04:33:23,495 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 04:33:23,495 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-22 04:33:23,496 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 04:33:23,496 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-22 04:33:23,498 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-22 04:33:23,499 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-22 04:33:23,501 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-22 04:33:23,501 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-22 04:33:23,502 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-22 04:33:23,503 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-22 04:33:23,510 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-22 04:33:23,512 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-22 04:33:23,514 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-22 04:33:23,514 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-22 04:33:23,514 main DEBUG createAppenders(={Console}) 2024-11-22 04:33:23,515 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-22 04:33:23,515 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-22 04:33:23,516 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-22 04:33:23,516 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-22 04:33:23,516 main DEBUG OutputStream closed 2024-11-22 04:33:23,517 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-22 04:33:23,517 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-22 04:33:23,517 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-22 04:33:23,590 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-22 04:33:23,592 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-22 04:33:23,593 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-22 04:33:23,594 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-22 04:33:23,594 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-22 04:33:23,594 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-22 04:33:23,595 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-22 04:33:23,595 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-22 04:33:23,595 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-22 04:33:23,595 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-22 04:33:23,596 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-22 04:33:23,596 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-22 04:33:23,596 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-22 04:33:23,597 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-22 04:33:23,597 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-22 04:33:23,597 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-22 04:33:23,597 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-22 04:33:23,598 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-22 04:33:23,600 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22 04:33:23,600 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-logging/target/hbase-logging-3.0.0-beta-2-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-22 04:33:23,601 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-22 04:33:23,601 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-22T04:33:23,815 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4 2024-11-22 04:33:23,818 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-22 04:33:23,818 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-22T04:33:23,827 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-22T04:33:23,865 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=272, ProcessCount=11, AvailableMemoryMB=10127 2024-11-22T04:33:23,867 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T04:33:23,882 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/cluster_b39b6d22-2e5b-b2ff-faef-eea1fa5c46c0, deleteOnExit=true 2024-11-22T04:33:23,883 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T04:33:23,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/test.cache.data in system properties and HBase conf 2024-11-22T04:33:23,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T04:33:23,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/hadoop.log.dir in system properties and HBase conf 2024-11-22T04:33:23,885 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T04:33:23,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T04:33:23,886 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T04:33:23,959 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-22T04:33:24,042 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T04:33:24,045 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T04:33:24,046 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T04:33:24,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T04:33:24,047 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T04:33:24,048 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T04:33:24,048 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T04:33:24,049 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T04:33:24,049 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T04:33:24,050 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T04:33:24,050 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/nfs.dump.dir in system properties and HBase conf 2024-11-22T04:33:24,051 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/java.io.tmpdir in system properties and HBase conf 2024-11-22T04:33:24,051 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T04:33:24,051 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T04:33:24,052 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T04:33:24,478 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T04:33:25,029 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-22T04:33:25,100 INFO [Time-limited test {}] log.Log(170): Logging initialized @2366ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-22T04:33:25,166 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:33:25,228 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:33:25,251 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:33:25,251 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:33:25,253 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T04:33:25,267 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:33:25,287 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:33:25,288 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:33:25,569 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/java.io.tmpdir/jetty-localhost-45097-hadoop-hdfs-3_4_1-tests_jar-_-any-16162112408032681812/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T04:33:25,578 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:45097} 2024-11-22T04:33:25,579 INFO [Time-limited test {}] server.Server(415): Started @2845ms 2024-11-22T04:33:25,616 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T04:33:26,263 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:33:26,277 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:33:26,283 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:33:26,284 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:33:26,284 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T04:33:26,285 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:33:26,286 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:33:26,420 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6c2fdbac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/java.io.tmpdir/jetty-localhost-44465-hadoop-hdfs-3_4_1-tests_jar-_-any-8898834426540842370/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:33:26,421 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:44465} 2024-11-22T04:33:26,422 INFO [Time-limited test {}] server.Server(415): Started @3688ms 2024-11-22T04:33:26,490 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:33:26,682 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:33:26,690 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:33:26,702 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:33:26,703 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:33:26,703 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:33:26,709 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:33:26,710 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:33:26,854 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1467625d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/java.io.tmpdir/jetty-localhost-37307-hadoop-hdfs-3_4_1-tests_jar-_-any-973539902523529882/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:33:26,855 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:37307} 2024-11-22T04:33:26,855 INFO [Time-limited test {}] server.Server(415): Started @4122ms 2024-11-22T04:33:26,862 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:33:28,637 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/cluster_b39b6d22-2e5b-b2ff-faef-eea1fa5c46c0/data/data2/current/BP-1604185574-172.17.0.2-1732250004553/current, will proceed with Du for space computation calculation, 2024-11-22T04:33:28,637 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/cluster_b39b6d22-2e5b-b2ff-faef-eea1fa5c46c0/data/data1/current/BP-1604185574-172.17.0.2-1732250004553/current, will proceed with Du for space computation calculation, 2024-11-22T04:33:28,664 WARN [Thread-104 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/cluster_b39b6d22-2e5b-b2ff-faef-eea1fa5c46c0/data/data3/current/BP-1604185574-172.17.0.2-1732250004553/current, will proceed with Du for space computation calculation, 2024-11-22T04:33:28,664 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/cluster_b39b6d22-2e5b-b2ff-faef-eea1fa5c46c0/data/data4/current/BP-1604185574-172.17.0.2-1732250004553/current, will proceed with Du for space computation calculation, 2024-11-22T04:33:28,676 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:33:28,687 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:33:28,719 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7e5c933f0a893f13 with lease ID 0x8d4dd23c90eef5af: Processing first storage report for DS-85631f78-1949-4ee3-84ee-152a50332791 from datanode DatanodeRegistration(127.0.0.1:41265, datanodeUuid=e63b29ae-ef87-4fe2-b19f-6fd426574dd2, infoPort=42513, infoSecurePort=0, ipcPort=39535, storageInfo=lv=-57;cid=testClusterID;nsid=1536031019;c=1732250004553) 2024-11-22T04:33:28,721 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7e5c933f0a893f13 with lease ID 0x8d4dd23c90eef5af: from storage DS-85631f78-1949-4ee3-84ee-152a50332791 node DatanodeRegistration(127.0.0.1:41265, datanodeUuid=e63b29ae-ef87-4fe2-b19f-6fd426574dd2, infoPort=42513, infoSecurePort=0, ipcPort=39535, storageInfo=lv=-57;cid=testClusterID;nsid=1536031019;c=1732250004553), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T04:33:28,721 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbdaedd4cc4fe0d05 with lease ID 0x8d4dd23c90eef5b0: Processing first storage report for DS-7cc3963a-7a53-4333-a82a-8af26fd445fc from datanode DatanodeRegistration(127.0.0.1:35923, datanodeUuid=3ca1c472-8fa9-46f4-b133-01adb121d757, infoPort=44893, infoSecurePort=0, ipcPort=39609, storageInfo=lv=-57;cid=testClusterID;nsid=1536031019;c=1732250004553) 2024-11-22T04:33:28,721 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbdaedd4cc4fe0d05 with lease ID 0x8d4dd23c90eef5b0: from storage DS-7cc3963a-7a53-4333-a82a-8af26fd445fc node DatanodeRegistration(127.0.0.1:35923, datanodeUuid=3ca1c472-8fa9-46f4-b133-01adb121d757, infoPort=44893, infoSecurePort=0, ipcPort=39609, storageInfo=lv=-57;cid=testClusterID;nsid=1536031019;c=1732250004553), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:33:28,722 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7e5c933f0a893f13 with lease ID 0x8d4dd23c90eef5af: Processing first storage report for DS-7b8d9aa4-3a8c-4118-90d4-cadfe455eae9 from datanode DatanodeRegistration(127.0.0.1:41265, datanodeUuid=e63b29ae-ef87-4fe2-b19f-6fd426574dd2, infoPort=42513, infoSecurePort=0, ipcPort=39535, storageInfo=lv=-57;cid=testClusterID;nsid=1536031019;c=1732250004553) 2024-11-22T04:33:28,722 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7e5c933f0a893f13 with lease ID 0x8d4dd23c90eef5af: from storage DS-7b8d9aa4-3a8c-4118-90d4-cadfe455eae9 node DatanodeRegistration(127.0.0.1:41265, datanodeUuid=e63b29ae-ef87-4fe2-b19f-6fd426574dd2, infoPort=42513, infoSecurePort=0, ipcPort=39535, storageInfo=lv=-57;cid=testClusterID;nsid=1536031019;c=1732250004553), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:33:28,722 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbdaedd4cc4fe0d05 with lease ID 0x8d4dd23c90eef5b0: Processing first storage report for DS-2cd947d7-d40f-447f-a43b-53075c601ecd from datanode DatanodeRegistration(127.0.0.1:35923, datanodeUuid=3ca1c472-8fa9-46f4-b133-01adb121d757, infoPort=44893, infoSecurePort=0, ipcPort=39609, storageInfo=lv=-57;cid=testClusterID;nsid=1536031019;c=1732250004553) 2024-11-22T04:33:28,722 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbdaedd4cc4fe0d05 with lease ID 0x8d4dd23c90eef5b0: from storage DS-2cd947d7-d40f-447f-a43b-53075c601ecd node DatanodeRegistration(127.0.0.1:35923, datanodeUuid=3ca1c472-8fa9-46f4-b133-01adb121d757, infoPort=44893, infoSecurePort=0, ipcPort=39609, storageInfo=lv=-57;cid=testClusterID;nsid=1536031019;c=1732250004553), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:33:28,792 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4 2024-11-22T04:33:28,873 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/cluster_b39b6d22-2e5b-b2ff-faef-eea1fa5c46c0/zookeeper_0, clientPort=51545, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/cluster_b39b6d22-2e5b-b2ff-faef-eea1fa5c46c0/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/cluster_b39b6d22-2e5b-b2ff-faef-eea1fa5c46c0/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T04:33:28,882 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51545 2024-11-22T04:33:28,891 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:33:28,893 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:33:29,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741825_1001 (size=7) 2024-11-22T04:33:29,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741825_1001 (size=7) 2024-11-22T04:33:29,494 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4 with version=8 2024-11-22T04:33:29,494 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/hbase-staging 2024-11-22T04:33:29,568 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-22T04:33:29,808 INFO [Time-limited test {}] client.ConnectionUtils(128): master/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:33:29,817 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:33:29,817 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:33:29,821 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:33:29,821 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:33:29,822 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:33:29,948 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T04:33:30,002 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-22T04:33:30,011 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-22T04:33:30,014 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:33:30,035 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 97181 (auto-detected) 2024-11-22T04:33:30,036 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-22T04:33:30,052 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37977 2024-11-22T04:33:30,075 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:37977 connecting to ZooKeeper ensemble=127.0.0.1:51545 2024-11-22T04:33:30,194 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:379770x0, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:33:30,199 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:37977-0x10160d178c10000 connected 2024-11-22T04:33:30,304 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:33:30,308 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:33:30,322 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:33:30,326 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4, hbase.cluster.distributed=false 2024-11-22T04:33:30,350 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:33:30,354 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37977 2024-11-22T04:33:30,355 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37977 2024-11-22T04:33:30,356 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37977 2024-11-22T04:33:30,360 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37977 2024-11-22T04:33:30,360 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37977 2024-11-22T04:33:30,457 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:33:30,459 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:33:30,459 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:33:30,459 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:33:30,460 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:33:30,460 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:33:30,462 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T04:33:30,465 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:33:30,466 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33755 2024-11-22T04:33:30,468 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33755 connecting to ZooKeeper ensemble=127.0.0.1:51545 2024-11-22T04:33:30,469 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:33:30,473 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:33:30,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:337550x0, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:33:30,491 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:337550x0, quorum=127.0.0.1:51545, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:33:30,492 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33755-0x10160d178c10001 connected 2024-11-22T04:33:30,496 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T04:33:30,503 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T04:33:30,505 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T04:33:30,511 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:33:30,512 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33755 2024-11-22T04:33:30,512 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33755 2024-11-22T04:33:30,512 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33755 2024-11-22T04:33:30,513 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33755 2024-11-22T04:33:30,514 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33755 2024-11-22T04:33:30,527 DEBUG [M:0;8fc3ff0a63e6:37977 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;8fc3ff0a63e6:37977 2024-11-22T04:33:30,528 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/8fc3ff0a63e6,37977,1732250009659 2024-11-22T04:33:30,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:33:30,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:33:30,545 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/8fc3ff0a63e6,37977,1732250009659 2024-11-22T04:33:30,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T04:33:30,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:33:30,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:33:30,576 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T04:33:30,577 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/8fc3ff0a63e6,37977,1732250009659 from backup master directory 2024-11-22T04:33:30,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/8fc3ff0a63e6,37977,1732250009659 2024-11-22T04:33:30,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:33:30,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:33:30,586 WARN [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:33:30,586 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=8fc3ff0a63e6,37977,1732250009659 2024-11-22T04:33:30,589 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-22T04:33:30,591 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-22T04:33:30,644 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/hbase.id] with ID: 2ae8c2e9-f304-42df-a292-935be714c721 2024-11-22T04:33:30,644 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/.tmp/hbase.id 2024-11-22T04:33:30,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:33:30,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:33:30,658 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/.tmp/hbase.id]:[hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/hbase.id] 2024-11-22T04:33:30,701 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:33:30,706 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T04:33:30,724 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 15ms. 2024-11-22T04:33:30,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:33:30,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:33:30,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741827_1003 (size=196) 2024-11-22T04:33:30,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741827_1003 (size=196) 2024-11-22T04:33:30,774 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T04:33:30,776 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T04:33:30,781 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:33:30,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:33:30,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:33:30,832 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store 2024-11-22T04:33:30,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741829_1005 (size=34) 2024-11-22T04:33:30,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741829_1005 (size=34) 2024-11-22T04:33:30,856 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-22T04:33:30,860 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:33:30,861 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T04:33:30,861 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:33:30,861 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:33:30,862 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T04:33:30,862 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:33:30,863 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:33:30,863 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732250010861Disabling compacts and flushes for region at 1732250010861Disabling writes for close at 1732250010862 (+1 ms)Writing region close event to WAL at 1732250010862Closed at 1732250010862 2024-11-22T04:33:30,865 WARN [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/.initializing 2024-11-22T04:33:30,866 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/WALs/8fc3ff0a63e6,37977,1732250009659 2024-11-22T04:33:30,886 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C37977%2C1732250009659, suffix=, logDir=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/WALs/8fc3ff0a63e6,37977,1732250009659, archiveDir=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/oldWALs, maxLogs=10 2024-11-22T04:33:30,894 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C37977%2C1732250009659.1732250010890 2024-11-22T04:33:30,912 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/WALs/8fc3ff0a63e6,37977,1732250009659/8fc3ff0a63e6%2C37977%2C1732250009659.1732250010890 2024-11-22T04:33:30,921 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44893:44893),(127.0.0.1/127.0.0.1:42513:42513)] 2024-11-22T04:33:30,923 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:33:30,923 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:33:30,926 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:33:30,927 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:33:30,960 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:33:30,983 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T04:33:30,987 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:30,989 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:33:30,990 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:33:30,993 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T04:33:30,994 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:30,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:33:30,995 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:33:30,999 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T04:33:30,999 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:31,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:33:31,000 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:33:31,003 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T04:33:31,003 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:31,005 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:33:31,005 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:33:31,009 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:33:31,011 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:33:31,017 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:33:31,018 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:33:31,022 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T04:33:31,025 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:33:31,029 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:33:31,030 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=876580, jitterRate=0.11462953686714172}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T04:33:31,037 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732250010939Initializing all the Stores at 1732250010941 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250010941Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250010942 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250010942Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250010942Cleaning up temporary data from old regions at 1732250011018 (+76 ms)Region opened successfully at 1732250011037 (+19 ms) 2024-11-22T04:33:31,038 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T04:33:31,067 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@eb1dcfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:33:31,094 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T04:33:31,106 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T04:33:31,106 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T04:33:31,109 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T04:33:31,110 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-22T04:33:31,114 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-22T04:33:31,114 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T04:33:31,139 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T04:33:31,147 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T04:33:31,228 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T04:33:31,233 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T04:33:31,235 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T04:33:31,248 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T04:33:31,251 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T04:33:31,256 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T04:33:31,265 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T04:33:31,267 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T04:33:31,280 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T04:33:31,298 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T04:33:31,311 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T04:33:31,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T04:33:31,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T04:33:31,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:33:31,322 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:33:31,326 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=8fc3ff0a63e6,37977,1732250009659, sessionid=0x10160d178c10000, setting cluster-up flag (Was=false) 2024-11-22T04:33:31,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:33:31,360 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:33:31,396 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T04:33:31,399 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8fc3ff0a63e6,37977,1732250009659 2024-11-22T04:33:31,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:33:31,423 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:33:31,459 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T04:33:31,464 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8fc3ff0a63e6,37977,1732250009659 2024-11-22T04:33:31,471 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T04:33:31,518 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(746): ClusterId : 2ae8c2e9-f304-42df-a292-935be714c721 2024-11-22T04:33:31,520 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T04:33:31,535 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T04:33:31,534 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T04:33:31,535 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T04:33:31,542 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T04:33:31,544 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T04:33:31,545 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35402214, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:33:31,549 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T04:33:31,553 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 8fc3ff0a63e6,37977,1732250009659 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T04:33:31,560 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;8fc3ff0a63e6:33755 2024-11-22T04:33:31,560 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:33:31,560 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:33:31,560 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:33:31,560 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:33:31,561 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/8fc3ff0a63e6:0, corePoolSize=10, maxPoolSize=10 2024-11-22T04:33:31,561 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:33:31,561 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:33:31,561 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:33:31,562 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732250041562 2024-11-22T04:33:31,563 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T04:33:31,563 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T04:33:31,563 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T04:33:31,564 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T04:33:31,564 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T04:33:31,566 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:33:31,566 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(2659): reportForDuty to master=8fc3ff0a63e6,37977,1732250009659 with port=33755, startcode=1732250010424 2024-11-22T04:33:31,567 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T04:33:31,568 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T04:33:31,568 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T04:33:31,568 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T04:33:31,568 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T04:33:31,569 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:31,572 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T04:33:31,573 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T04:33:31,573 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T04:33:31,573 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:31,573 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T04:33:31,575 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T04:33:31,575 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T04:33:31,577 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250011576,5,FailOnTimeoutGroup] 2024-11-22T04:33:31,579 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T04:33:31,580 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250011577,5,FailOnTimeoutGroup] 2024-11-22T04:33:31,580 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:31,580 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T04:33:31,581 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:31,582 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:31,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:33:31,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:33:31,594 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T04:33:31,594 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4 2024-11-22T04:33:31,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:33:31,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:33:31,606 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:33:31,610 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T04:33:31,613 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T04:33:31,613 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:31,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:33:31,614 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T04:33:31,617 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T04:33:31,617 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:31,620 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:33:31,620 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T04:33:31,623 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T04:33:31,623 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:31,624 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:33:31,625 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T04:33:31,627 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T04:33:31,627 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:31,628 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:33:31,628 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T04:33:31,630 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740 2024-11-22T04:33:31,631 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740 2024-11-22T04:33:31,634 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T04:33:31,635 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T04:33:31,636 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T04:33:31,638 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T04:33:31,641 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:33:31,642 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=746105, jitterRate=-0.05127967894077301}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T04:33:31,646 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732250011607Initializing all the Stores at 1732250011609 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250011609Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250011609Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250011609Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250011610 (+1 ms)Cleaning up temporary data from old regions at 1732250011635 (+25 ms)Region opened successfully at 1732250011646 (+11 ms) 2024-11-22T04:33:31,646 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T04:33:31,646 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T04:33:31,646 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T04:33:31,647 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T04:33:31,647 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T04:33:31,648 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T04:33:31,648 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732250011646Disabling compacts and flushes for region at 1732250011646Disabling writes for close at 1732250011647 (+1 ms)Writing region close event to WAL at 1732250011647Closed at 1732250011648 (+1 ms) 2024-11-22T04:33:31,651 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51449, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T04:33:31,652 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:33:31,652 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T04:33:31,657 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37977 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 8fc3ff0a63e6,33755,1732250010424 2024-11-22T04:33:31,659 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37977 {}] master.ServerManager(517): Registering regionserver=8fc3ff0a63e6,33755,1732250010424 2024-11-22T04:33:31,659 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T04:33:31,667 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T04:33:31,670 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T04:33:31,672 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4 2024-11-22T04:33:31,672 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37209 2024-11-22T04:33:31,672 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T04:33:31,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:33:31,687 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] zookeeper.ZKUtil(111): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8fc3ff0a63e6,33755,1732250010424 2024-11-22T04:33:31,687 WARN [RS:0;8fc3ff0a63e6:33755 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:33:31,687 INFO [RS:0;8fc3ff0a63e6:33755 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:33:31,687 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424 2024-11-22T04:33:31,690 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8fc3ff0a63e6,33755,1732250010424] 2024-11-22T04:33:31,717 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T04:33:31,731 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T04:33:31,735 INFO [RS:0;8fc3ff0a63e6:33755 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T04:33:31,735 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:31,736 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T04:33:31,741 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T04:33:31,743 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:31,743 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:33:31,743 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:33:31,743 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:33:31,743 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:33:31,743 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:33:31,744 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:33:31,744 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:33:31,744 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:33:31,744 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:33:31,744 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:33:31,744 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:33:31,744 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:33:31,744 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:33:31,745 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:33:31,745 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:31,746 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:31,746 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:31,746 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:31,746 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:31,746 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,33755,1732250010424-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:33:31,762 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T04:33:31,764 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,33755,1732250010424-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:31,764 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:31,765 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.Replication(171): 8fc3ff0a63e6,33755,1732250010424 started 2024-11-22T04:33:31,780 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:31,780 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(1482): Serving as 8fc3ff0a63e6,33755,1732250010424, RpcServer on 8fc3ff0a63e6/172.17.0.2:33755, sessionid=0x10160d178c10001 2024-11-22T04:33:31,781 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T04:33:31,781 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8fc3ff0a63e6,33755,1732250010424 2024-11-22T04:33:31,781 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,33755,1732250010424' 2024-11-22T04:33:31,781 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T04:33:31,782 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T04:33:31,783 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T04:33:31,783 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T04:33:31,783 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8fc3ff0a63e6,33755,1732250010424 2024-11-22T04:33:31,783 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,33755,1732250010424' 2024-11-22T04:33:31,783 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T04:33:31,784 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T04:33:31,784 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T04:33:31,784 INFO [RS:0;8fc3ff0a63e6:33755 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T04:33:31,785 INFO [RS:0;8fc3ff0a63e6:33755 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T04:33:31,821 WARN [8fc3ff0a63e6:37977 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T04:33:31,897 INFO [RS:0;8fc3ff0a63e6:33755 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C33755%2C1732250010424, suffix=, logDir=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424, archiveDir=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/oldWALs, maxLogs=32 2024-11-22T04:33:31,900 INFO [RS:0;8fc3ff0a63e6:33755 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C33755%2C1732250010424.1732250011900 2024-11-22T04:33:31,909 INFO [RS:0;8fc3ff0a63e6:33755 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250011900 2024-11-22T04:33:31,910 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44893:44893),(127.0.0.1/127.0.0.1:42513:42513)] 2024-11-22T04:33:32,074 DEBUG [8fc3ff0a63e6:37977 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T04:33:32,087 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=8fc3ff0a63e6,33755,1732250010424 2024-11-22T04:33:32,092 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8fc3ff0a63e6,33755,1732250010424, state=OPENING 2024-11-22T04:33:32,143 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T04:33:32,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:33:32,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:33:32,155 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:33:32,155 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:33:32,157 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T04:33:32,159 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,33755,1732250010424}] 2024-11-22T04:33:32,338 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T04:33:32,342 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57601, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T04:33:32,353 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T04:33:32,353 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:33:32,357 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C33755%2C1732250010424.meta, suffix=.meta, logDir=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424, archiveDir=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/oldWALs, maxLogs=32 2024-11-22T04:33:32,359 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C33755%2C1732250010424.meta.1732250012359.meta 2024-11-22T04:33:32,370 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.meta.1732250012359.meta 2024-11-22T04:33:32,373 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44893:44893),(127.0.0.1/127.0.0.1:42513:42513)] 2024-11-22T04:33:32,374 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:33:32,376 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T04:33:32,378 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T04:33:32,383 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T04:33:32,388 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T04:33:32,389 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:33:32,389 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T04:33:32,389 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T04:33:32,392 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T04:33:32,394 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T04:33:32,394 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:32,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:33:32,396 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T04:33:32,398 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T04:33:32,398 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:32,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:33:32,399 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T04:33:32,405 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T04:33:32,406 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:32,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:33:32,407 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T04:33:32,409 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T04:33:32,409 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:32,410 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:33:32,411 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T04:33:32,412 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740 2024-11-22T04:33:32,416 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740 2024-11-22T04:33:32,419 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T04:33:32,419 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T04:33:32,420 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T04:33:32,424 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T04:33:32,426 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=774537, jitterRate=-0.015125826001167297}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T04:33:32,427 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T04:33:32,428 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732250012390Writing region info on filesystem at 1732250012390Initializing all the Stores at 1732250012392 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250012392Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250012392Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250012392Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250012392Cleaning up temporary data from old regions at 1732250012419 (+27 ms)Running coprocessor post-open hooks at 1732250012427 (+8 ms)Region opened successfully at 1732250012428 (+1 ms) 2024-11-22T04:33:32,436 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732250012326 2024-11-22T04:33:32,448 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T04:33:32,449 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T04:33:32,451 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=8fc3ff0a63e6,33755,1732250010424 2024-11-22T04:33:32,454 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8fc3ff0a63e6,33755,1732250010424, state=OPEN 2024-11-22T04:33:32,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T04:33:32,491 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T04:33:32,491 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:33:32,491 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:33:32,492 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,33755,1732250010424 2024-11-22T04:33:32,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T04:33:32,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,33755,1732250010424 in 333 msec 2024-11-22T04:33:32,509 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T04:33:32,509 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 844 msec 2024-11-22T04:33:32,511 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:33:32,511 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T04:33:32,535 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T04:33:32,537 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8fc3ff0a63e6,33755,1732250010424, seqNum=-1] 2024-11-22T04:33:32,563 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T04:33:32,565 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46915, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T04:33:32,586 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0860 sec 2024-11-22T04:33:32,586 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732250012586, completionTime=-1 2024-11-22T04:33:32,589 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T04:33:32,589 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T04:33:32,614 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T04:33:32,615 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732250072614 2024-11-22T04:33:32,615 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732250132615 2024-11-22T04:33:32,615 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 25 msec 2024-11-22T04:33:32,618 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,37977,1732250009659-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:32,618 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,37977,1732250009659-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:32,618 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,37977,1732250009659-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:32,620 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-8fc3ff0a63e6:37977, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:32,620 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:32,621 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:32,628 DEBUG [master/8fc3ff0a63e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T04:33:32,648 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.061sec 2024-11-22T04:33:32,649 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T04:33:32,650 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T04:33:32,651 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T04:33:32,651 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T04:33:32,651 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T04:33:32,652 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,37977,1732250009659-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:33:32,653 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,37977,1732250009659-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T04:33:32,662 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T04:33:32,663 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T04:33:32,663 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,37977,1732250009659-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:33:32,731 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4c233f9d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:33:32,734 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-22T04:33:32,734 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-22T04:33:32,738 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 8fc3ff0a63e6,37977,-1 for getting cluster id 2024-11-22T04:33:32,741 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T04:33:32,748 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2ae8c2e9-f304-42df-a292-935be714c721' 2024-11-22T04:33:32,751 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T04:33:32,752 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2ae8c2e9-f304-42df-a292-935be714c721" 2024-11-22T04:33:32,754 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24713418, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:33:32,754 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [8fc3ff0a63e6,37977,-1] 2024-11-22T04:33:32,756 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T04:33:32,758 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:33:32,759 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33838, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T04:33:32,762 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@783fe34b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:33:32,763 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T04:33:32,769 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8fc3ff0a63e6,33755,1732250010424, seqNum=-1] 2024-11-22T04:33:32,770 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T04:33:32,773 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51332, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T04:33:32,809 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=8fc3ff0a63e6,37977,1732250009659 2024-11-22T04:33:32,809 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:33:32,816 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T04:33:32,820 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T04:33:32,828 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 8fc3ff0a63e6,37977,1732250009659 2024-11-22T04:33:32,831 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5fd5bef9 2024-11-22T04:33:32,833 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T04:33:32,836 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33844, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T04:33:32,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T04:33:32,838 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T04:33:32,842 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T04:33:32,850 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-22T04:33:32,853 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T04:33:32,855 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-22T04:33:32,856 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:32,858 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T04:33:32,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T04:33:32,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741835_1011 (size=389) 2024-11-22T04:33:32,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741835_1011 (size=389) 2024-11-22T04:33:32,895 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 849eaa6fb87d20548eb5a1f32af954ff, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4 2024-11-22T04:33:32,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741836_1012 (size=72) 2024-11-22T04:33:32,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741836_1012 (size=72) 2024-11-22T04:33:32,906 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:33:32,906 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 849eaa6fb87d20548eb5a1f32af954ff, disabling compactions & flushes 2024-11-22T04:33:32,906 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. 2024-11-22T04:33:32,906 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. 2024-11-22T04:33:32,906 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. after waiting 0 ms 2024-11-22T04:33:32,906 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. 2024-11-22T04:33:32,906 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. 2024-11-22T04:33:32,906 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 849eaa6fb87d20548eb5a1f32af954ff: Waiting for close lock at 1732250012906Disabling compacts and flushes for region at 1732250012906Disabling writes for close at 1732250012906Writing region close event to WAL at 1732250012906Closed at 1732250012906 2024-11-22T04:33:32,908 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T04:33:32,912 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732250012908"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732250012908"}]},"ts":"1732250012908"} 2024-11-22T04:33:32,917 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T04:33:32,918 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T04:33:32,920 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732250012918"}]},"ts":"1732250012918"} 2024-11-22T04:33:32,925 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-22T04:33:32,927 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=849eaa6fb87d20548eb5a1f32af954ff, ASSIGN}] 2024-11-22T04:33:32,930 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=849eaa6fb87d20548eb5a1f32af954ff, ASSIGN 2024-11-22T04:33:32,933 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=849eaa6fb87d20548eb5a1f32af954ff, ASSIGN; state=OFFLINE, location=8fc3ff0a63e6,33755,1732250010424; forceNewPlan=false, retain=false 2024-11-22T04:33:33,084 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=849eaa6fb87d20548eb5a1f32af954ff, regionState=OPENING, regionLocation=8fc3ff0a63e6,33755,1732250010424 2024-11-22T04:33:33,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=849eaa6fb87d20548eb5a1f32af954ff, ASSIGN because future has completed 2024-11-22T04:33:33,090 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 849eaa6fb87d20548eb5a1f32af954ff, server=8fc3ff0a63e6,33755,1732250010424}] 2024-11-22T04:33:33,251 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. 2024-11-22T04:33:33,251 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 849eaa6fb87d20548eb5a1f32af954ff, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff.', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:33:33,251 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:33:33,251 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:33:33,251 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:33:33,252 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:33:33,254 INFO [StoreOpener-849eaa6fb87d20548eb5a1f32af954ff-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:33:33,256 INFO [StoreOpener-849eaa6fb87d20548eb5a1f32af954ff-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 849eaa6fb87d20548eb5a1f32af954ff columnFamilyName info 2024-11-22T04:33:33,256 DEBUG [StoreOpener-849eaa6fb87d20548eb5a1f32af954ff-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:33:33,257 INFO [StoreOpener-849eaa6fb87d20548eb5a1f32af954ff-1 {}] regionserver.HStore(327): Store=849eaa6fb87d20548eb5a1f32af954ff/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:33:33,257 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:33:33,259 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:33:33,259 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:33:33,260 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:33:33,260 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:33:33,262 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:33:33,266 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:33:33,267 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 849eaa6fb87d20548eb5a1f32af954ff; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=753243, jitterRate=-0.04220260679721832}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T04:33:33,268 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:33:33,269 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 849eaa6fb87d20548eb5a1f32af954ff: Running coprocessor pre-open hook at 1732250013252Writing region info on filesystem at 1732250013252Initializing all the Stores at 1732250013253 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250013253Cleaning up temporary data from old regions at 1732250013260 (+7 ms)Running coprocessor post-open hooks at 1732250013268 (+8 ms)Region opened successfully at 1732250013268 2024-11-22T04:33:33,271 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff., pid=6, masterSystemTime=1732250013244 2024-11-22T04:33:33,275 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. 2024-11-22T04:33:33,276 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. 2024-11-22T04:33:33,277 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=849eaa6fb87d20548eb5a1f32af954ff, regionState=OPEN, openSeqNum=2, regionLocation=8fc3ff0a63e6,33755,1732250010424 2024-11-22T04:33:33,281 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 849eaa6fb87d20548eb5a1f32af954ff, server=8fc3ff0a63e6,33755,1732250010424 because future has completed 2024-11-22T04:33:33,284 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=37977 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=8fc3ff0a63e6,33755,1732250010424, table=TestLogRolling-testSlowSyncLogRolling, region=849eaa6fb87d20548eb5a1f32af954ff. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-22T04:33:33,292 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T04:33:33,294 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 849eaa6fb87d20548eb5a1f32af954ff, server=8fc3ff0a63e6,33755,1732250010424 in 196 msec 2024-11-22T04:33:33,298 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T04:33:33,298 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=849eaa6fb87d20548eb5a1f32af954ff, ASSIGN in 365 msec 2024-11-22T04:33:33,301 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T04:33:33,301 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732250013301"}]},"ts":"1732250013301"} 2024-11-22T04:33:33,307 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-22T04:33:33,310 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T04:33:33,318 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 465 msec 2024-11-22T04:33:37,901 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-22T04:33:37,960 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T04:33:37,961 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-22T04:33:39,998 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T04:33:39,999 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T04:33:40,004 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-22T04:33:40,004 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T04:33:40,007 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T04:33:40,008 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T04:33:40,008 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T04:33:40,008 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-22T04:33:42,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37977 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T04:33:42,883 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-22T04:33:42,889 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-22T04:33:42,896 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-22T04:33:42,897 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. 2024-11-22T04:33:42,897 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C33755%2C1732250010424.1732250022897 2024-11-22T04:33:43,082 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:33:43,082 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:33:43,082 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:33:43,082 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:33:43,083 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:33:43,083 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250011900 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250022897 2024-11-22T04:33:43,084 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44893:44893),(127.0.0.1/127.0.0.1:42513:42513)] 2024-11-22T04:33:43,084 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250011900 is not closed yet, will try archiving it next time 2024-11-22T04:33:43,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741833_1009 (size=451) 2024-11-22T04:33:43,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741833_1009 (size=451) 2024-11-22T04:33:43,088 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250011900 to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/oldWALs/8fc3ff0a63e6%2C33755%2C1732250010424.1732250011900 2024-11-22T04:33:43,094 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff., hostname=8fc3ff0a63e6,33755,1732250010424, seqNum=2] 2024-11-22T04:33:55,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33755 {}] regionserver.HRegion(8855): Flush requested on 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:33:55,149 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 849eaa6fb87d20548eb5a1f32af954ff 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T04:33:55,203 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/4724934b5f19471cb1873a522165a04a is 1080, key is row0001/info:/1732250023096/Put/seqid=0 2024-11-22T04:33:55,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741838_1014 (size=12509) 2024-11-22T04:33:55,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741838_1014 (size=12509) 2024-11-22T04:33:55,215 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/4724934b5f19471cb1873a522165a04a 2024-11-22T04:33:55,256 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/4724934b5f19471cb1873a522165a04a as hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/4724934b5f19471cb1873a522165a04a 2024-11-22T04:33:55,266 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/4724934b5f19471cb1873a522165a04a, entries=7, sequenceid=11, filesize=12.2 K 2024-11-22T04:33:55,274 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 849eaa6fb87d20548eb5a1f32af954ff in 127ms, sequenceid=11, compaction requested=false 2024-11-22T04:33:55,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 849eaa6fb87d20548eb5a1f32af954ff: 2024-11-22T04:33:58,789 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T04:34:03,159 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C33755%2C1732250010424.1732250043158 2024-11-22T04:34:03,366 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK], DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK]] 2024-11-22T04:34:03,367 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:03,367 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:03,367 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:03,367 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:03,367 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:03,367 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250022897 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250043158 2024-11-22T04:34:03,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741837_1013 (size=12399) 2024-11-22T04:34:03,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741837_1013 (size=12399) 2024-11-22T04:34:03,376 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42513:42513),(127.0.0.1/127.0.0.1:44893:44893)] 2024-11-22T04:34:03,580 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:05,786 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:07,990 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:10,194 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:10,194 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33755 {}] regionserver.HRegion(8855): Flush requested on 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:34:10,195 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 849eaa6fb87d20548eb5a1f32af954ff 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T04:34:10,397 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:10,403 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/7b076d9eb1a3421aa5413f8d87828f8d is 1080, key is row0008/info:/1732250037146/Put/seqid=0 2024-11-22T04:34:10,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741840_1016 (size=12509) 2024-11-22T04:34:10,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741840_1016 (size=12509) 2024-11-22T04:34:10,825 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/7b076d9eb1a3421aa5413f8d87828f8d 2024-11-22T04:34:10,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/7b076d9eb1a3421aa5413f8d87828f8d as hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/7b076d9eb1a3421aa5413f8d87828f8d 2024-11-22T04:34:10,851 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/7b076d9eb1a3421aa5413f8d87828f8d, entries=7, sequenceid=21, filesize=12.2 K 2024-11-22T04:34:11,054 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:11,054 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 849eaa6fb87d20548eb5a1f32af954ff in 859ms, sequenceid=21, compaction requested=false 2024-11-22T04:34:11,054 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 849eaa6fb87d20548eb5a1f32af954ff: 2024-11-22T04:34:11,055 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-22T04:34:11,055 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:34:11,056 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/4724934b5f19471cb1873a522165a04a because midkey is the same as first or last row 2024-11-22T04:34:12,399 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:12,676 INFO [master/8fc3ff0a63e6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T04:34:12,676 INFO [master/8fc3ff0a63e6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T04:34:14,602 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:14,604 WARN [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:14,605 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C33755%2C1732250010424:(num 1732250043158) roll requested 2024-11-22T04:34:14,606 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C33755%2C1732250010424.1732250054606 2024-11-22T04:34:14,813 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:14,813 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:14,814 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:14,814 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:14,814 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:14,814 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:14,814 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250043158 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250054606 2024-11-22T04:34:14,815 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44893:44893),(127.0.0.1/127.0.0.1:42513:42513)] 2024-11-22T04:34:14,815 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250043158 is not closed yet, will try archiving it next time 2024-11-22T04:34:14,816 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250022897 to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/oldWALs/8fc3ff0a63e6%2C33755%2C1732250010424.1732250022897 2024-11-22T04:34:14,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741839_1015 (size=7739) 2024-11-22T04:34:14,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741839_1015 (size=7739) 2024-11-22T04:34:16,807 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK], DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK]] 2024-11-22T04:34:18,252 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 849eaa6fb87d20548eb5a1f32af954ff, had cached 0 bytes from a total of 25018 2024-11-22T04:34:19,010 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK], DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK]] 2024-11-22T04:34:21,214 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK], DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK]] 2024-11-22T04:34:23,419 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK], DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK]] 2024-11-22T04:34:25,421 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T04:34:25,421 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C33755%2C1732250010424.1732250065421 2024-11-22T04:34:28,789 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T04:34:30,502 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5078 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK], DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK]] 2024-11-22T04:34:30,505 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5078 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK], DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK]] 2024-11-22T04:34:30,505 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C33755%2C1732250010424:(num 1732250065421) roll requested 2024-11-22T04:34:30,506 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:30,506 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:30,506 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:30,507 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:30,507 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:30,507 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250054606 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250065421 2024-11-22T04:34:30,508 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42513:42513),(127.0.0.1/127.0.0.1:44893:44893)] 2024-11-22T04:34:30,508 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250054606 is not closed yet, will try archiving it next time 2024-11-22T04:34:30,509 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C33755%2C1732250010424.1732250070509 2024-11-22T04:34:30,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741841_1017 (size=4753) 2024-11-22T04:34:30,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741841_1017 (size=4753) 2024-11-22T04:34:35,512 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:35,513 WARN [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:35,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33755 {}] regionserver.HRegion(8855): Flush requested on 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:34:35,513 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 849eaa6fb87d20548eb5a1f32af954ff 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T04:34:35,572 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5060 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:35,572 WARN [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5060 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:37,514 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T04:34:40,527 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5012 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:40,527 WARN [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5012 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK], DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK]] 2024-11-22T04:34:40,527 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:40,527 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:40,527 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:40,527 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:40,528 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:40,528 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250065421 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250070509 2024-11-22T04:34:40,530 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44893:44893),(127.0.0.1/127.0.0.1:42513:42513)] 2024-11-22T04:34:40,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741842_1018 (size=1569) 2024-11-22T04:34:40,530 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250065421 is not closed yet, will try archiving it next time 2024-11-22T04:34:40,530 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C33755%2C1732250010424:(num 1732250070509) roll requested 2024-11-22T04:34:40,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741842_1018 (size=1569) 2024-11-22T04:34:40,531 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C33755%2C1732250010424.1732250080530 2024-11-22T04:34:40,533 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/dadf22b0cb794a68a1f958a2a8cbc93e is 1080, key is row0015/info:/1732250052197/Put/seqid=0 2024-11-22T04:34:40,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741844_1020 (size=12509) 2024-11-22T04:34:40,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741844_1020 (size=12509) 2024-11-22T04:34:40,543 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/dadf22b0cb794a68a1f958a2a8cbc93e 2024-11-22T04:34:40,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/dadf22b0cb794a68a1f958a2a8cbc93e as hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/dadf22b0cb794a68a1f958a2a8cbc93e 2024-11-22T04:34:40,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/dadf22b0cb794a68a1f958a2a8cbc93e, entries=7, sequenceid=31, filesize=12.2 K 2024-11-22T04:34:45,542 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK], DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK]] 2024-11-22T04:34:45,542 WARN [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK], DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK]] 2024-11-22T04:34:45,564 INFO [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK], DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK]] 2024-11-22T04:34:45,564 WARN [FSHLog-0-hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4-prefix:8fc3ff0a63e6,33755,1732250010424 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35923,DS-7cc3963a-7a53-4333-a82a-8af26fd445fc,DISK], DatanodeInfoWithStorage[127.0.0.1:41265,DS-85631f78-1949-4ee3-84ee-152a50332791,DISK]] 2024-11-22T04:34:45,564 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 849eaa6fb87d20548eb5a1f32af954ff in 10051ms, sequenceid=31, compaction requested=true 2024-11-22T04:34:45,564 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,565 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 849eaa6fb87d20548eb5a1f32af954ff: 2024-11-22T04:34:45,565 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,565 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,565 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-22T04:34:45,565 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:34:45,565 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,565 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/4724934b5f19471cb1873a522165a04a because midkey is the same as first or last row 2024-11-22T04:34:45,565 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250070509 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250080530 2024-11-22T04:34:45,566 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42513:42513),(127.0.0.1/127.0.0.1:44893:44893)] 2024-11-22T04:34:45,566 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250070509 is not closed yet, will try archiving it next time 2024-11-22T04:34:45,566 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250043158 to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/oldWALs/8fc3ff0a63e6%2C33755%2C1732250010424.1732250043158 2024-11-22T04:34:45,566 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C33755%2C1732250010424:(num 1732250085566) roll requested 2024-11-22T04:34:45,567 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C33755%2C1732250010424.1732250085566 2024-11-22T04:34:45,567 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 849eaa6fb87d20548eb5a1f32af954ff:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T04:34:45,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741843_1019 (size=438) 2024-11-22T04:34:45,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741843_1019 (size=438) 2024-11-22T04:34:45,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:34:45,570 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T04:34:45,570 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250054606 to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/oldWALs/8fc3ff0a63e6%2C33755%2C1732250010424.1732250054606 2024-11-22T04:34:45,572 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250065421 to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/oldWALs/8fc3ff0a63e6%2C33755%2C1732250010424.1732250065421 2024-11-22T04:34:45,573 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T04:34:45,575 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.HStore(1541): 849eaa6fb87d20548eb5a1f32af954ff/info is initiating minor compaction (all files) 2024-11-22T04:34:45,575 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,575 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,576 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,576 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,576 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,576 INFO [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 849eaa6fb87d20548eb5a1f32af954ff/info in TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. 2024-11-22T04:34:45,576 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250080530 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250085566 2024-11-22T04:34:45,576 INFO [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/4724934b5f19471cb1873a522165a04a, hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/7b076d9eb1a3421aa5413f8d87828f8d, hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/dadf22b0cb794a68a1f958a2a8cbc93e] into tmpdir=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp, totalSize=36.6 K 2024-11-22T04:34:45,577 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44893:44893),(127.0.0.1/127.0.0.1:42513:42513)] 2024-11-22T04:34:45,577 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250070509 is not closed yet, will try archiving it next time 2024-11-22T04:34:45,577 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250080530 is not closed yet, will try archiving it next time 2024-11-22T04:34:45,577 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C33755%2C1732250010424.1732250085577 2024-11-22T04:34:45,578 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4724934b5f19471cb1873a522165a04a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732250023096 2024-11-22T04:34:45,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741845_1021 (size=93) 2024-11-22T04:34:45,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741845_1021 (size=93) 2024-11-22T04:34:45,581 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7b076d9eb1a3421aa5413f8d87828f8d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732250037146 2024-11-22T04:34:45,581 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250070509 is not closed yet, will try archiving it next time 2024-11-22T04:34:45,581 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250080530 to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/oldWALs/8fc3ff0a63e6%2C33755%2C1732250010424.1732250080530 2024-11-22T04:34:45,582 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] compactions.Compactor(225): Compacting dadf22b0cb794a68a1f958a2a8cbc93e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732250052197 2024-11-22T04:34:45,592 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,592 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,592 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,593 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,593 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:34:45,593 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250085566 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250085577 2024-11-22T04:34:45,594 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42513:42513),(127.0.0.1/127.0.0.1:44893:44893)] 2024-11-22T04:34:45,594 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250070509 is not closed yet, will try archiving it next time 2024-11-22T04:34:45,595 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250085566 is not closed yet, will try archiving it next time 2024-11-22T04:34:45,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741846_1022 (size=1258) 2024-11-22T04:34:45,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741846_1022 (size=1258) 2024-11-22T04:34:45,597 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250070509 is not closed yet, will try archiving it next time 2024-11-22T04:34:45,617 INFO [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 849eaa6fb87d20548eb5a1f32af954ff#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T04:34:45,618 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/d70d0b8732f04b82b129932bce89b6be is 1080, key is row0001/info:/1732250023096/Put/seqid=0 2024-11-22T04:34:45,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741848_1024 (size=27710) 2024-11-22T04:34:45,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741848_1024 (size=27710) 2024-11-22T04:34:45,972 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/WALs/8fc3ff0a63e6,33755,1732250010424/8fc3ff0a63e6%2C33755%2C1732250010424.1732250070509 to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/oldWALs/8fc3ff0a63e6%2C33755%2C1732250010424.1732250070509 2024-11-22T04:34:46,043 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/d70d0b8732f04b82b129932bce89b6be as hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/d70d0b8732f04b82b129932bce89b6be 2024-11-22T04:34:46,059 INFO [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 849eaa6fb87d20548eb5a1f32af954ff/info of 849eaa6fb87d20548eb5a1f32af954ff into d70d0b8732f04b82b129932bce89b6be(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T04:34:46,059 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 849eaa6fb87d20548eb5a1f32af954ff: 2024-11-22T04:34:46,061 INFO [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff., storeName=849eaa6fb87d20548eb5a1f32af954ff/info, priority=13, startTime=1732250085567; duration=0sec 2024-11-22T04:34:46,061 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T04:34:46,061 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:34:46,061 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/d70d0b8732f04b82b129932bce89b6be because midkey is the same as first or last row 2024-11-22T04:34:46,061 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T04:34:46,061 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:34:46,061 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/d70d0b8732f04b82b129932bce89b6be because midkey is the same as first or last row 2024-11-22T04:34:46,061 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-22T04:34:46,062 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:34:46,062 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/d70d0b8732f04b82b129932bce89b6be because midkey is the same as first or last row 2024-11-22T04:34:46,062 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:34:46,062 DEBUG [RS:0;8fc3ff0a63e6:33755-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 849eaa6fb87d20548eb5a1f32af954ff:info 2024-11-22T04:34:57,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33755 {}] regionserver.HRegion(8855): Flush requested on 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:34:57,609 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 849eaa6fb87d20548eb5a1f32af954ff 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T04:34:57,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/61a9aaa8404b4046bb511ef90e7718cd is 1080, key is row0022/info:/1732250085579/Put/seqid=0 2024-11-22T04:34:57,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741849_1025 (size=12509) 2024-11-22T04:34:57,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741849_1025 (size=12509) 2024-11-22T04:34:57,631 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/61a9aaa8404b4046bb511ef90e7718cd 2024-11-22T04:34:57,643 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/61a9aaa8404b4046bb511ef90e7718cd as hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/61a9aaa8404b4046bb511ef90e7718cd 2024-11-22T04:34:57,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/61a9aaa8404b4046bb511ef90e7718cd, entries=7, sequenceid=42, filesize=12.2 K 2024-11-22T04:34:57,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 849eaa6fb87d20548eb5a1f32af954ff in 44ms, sequenceid=42, compaction requested=false 2024-11-22T04:34:57,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 849eaa6fb87d20548eb5a1f32af954ff: 2024-11-22T04:34:57,654 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-22T04:34:57,654 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:34:57,654 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/d70d0b8732f04b82b129932bce89b6be because midkey is the same as first or last row 2024-11-22T04:34:58,790 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T04:35:03,252 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 849eaa6fb87d20548eb5a1f32af954ff, had cached 0 bytes from a total of 40219 2024-11-22T04:35:05,624 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T04:35:05,624 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T04:35:05,624 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:35:05,630 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:05,631 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:05,631 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T04:35:05,631 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T04:35:05,631 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=764753395, stopped=false 2024-11-22T04:35:05,632 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=8fc3ff0a63e6,37977,1732250009659 2024-11-22T04:35:05,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:35:05,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:35:05,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:05,754 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:05,754 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T04:35:05,755 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T04:35:05,755 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:35:05,756 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:05,756 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:35:05,756 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:35:05,757 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '8fc3ff0a63e6,33755,1732250010424' ***** 2024-11-22T04:35:05,757 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T04:35:05,757 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T04:35:05,758 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T04:35:05,758 INFO [RS:0;8fc3ff0a63e6:33755 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T04:35:05,758 INFO [RS:0;8fc3ff0a63e6:33755 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T04:35:05,759 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(3091): Received CLOSE for 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:35:05,760 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(959): stopping server 8fc3ff0a63e6,33755,1732250010424 2024-11-22T04:35:05,760 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:35:05,760 INFO [RS:0;8fc3ff0a63e6:33755 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;8fc3ff0a63e6:33755. 2024-11-22T04:35:05,760 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:35:05,760 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:05,760 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 849eaa6fb87d20548eb5a1f32af954ff, disabling compactions & flushes 2024-11-22T04:35:05,761 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. 2024-11-22T04:35:05,761 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. 2024-11-22T04:35:05,761 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T04:35:05,761 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. after waiting 0 ms 2024-11-22T04:35:05,761 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T04:35:05,761 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. 2024-11-22T04:35:05,761 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T04:35:05,761 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T04:35:05,761 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 849eaa6fb87d20548eb5a1f32af954ff 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-22T04:35:05,762 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T04:35:05,762 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T04:35:05,762 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 849eaa6fb87d20548eb5a1f32af954ff=TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff.} 2024-11-22T04:35:05,763 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T04:35:05,763 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T04:35:05,763 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T04:35:05,763 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T04:35:05,763 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 849eaa6fb87d20548eb5a1f32af954ff 2024-11-22T04:35:05,763 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-22T04:35:05,769 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/367d5805061d48638624309161a3b801 is 1080, key is row0029/info:/1732250099612/Put/seqid=0 2024-11-22T04:35:05,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741850_1026 (size=8193) 2024-11-22T04:35:05,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741850_1026 (size=8193) 2024-11-22T04:35:05,777 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/367d5805061d48638624309161a3b801 2024-11-22T04:35:05,784 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/.tmp/info/165f3ae78896444d98e3680db926d9d0 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff./info:regioninfo/1732250013277/Put/seqid=0 2024-11-22T04:35:05,787 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/.tmp/info/367d5805061d48638624309161a3b801 as hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/367d5805061d48638624309161a3b801 2024-11-22T04:35:05,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741851_1027 (size=7016) 2024-11-22T04:35:05,793 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741851_1027 (size=7016) 2024-11-22T04:35:05,794 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/.tmp/info/165f3ae78896444d98e3680db926d9d0 2024-11-22T04:35:05,796 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/367d5805061d48638624309161a3b801, entries=3, sequenceid=48, filesize=8.0 K 2024-11-22T04:35:05,798 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 849eaa6fb87d20548eb5a1f32af954ff in 36ms, sequenceid=48, compaction requested=true 2024-11-22T04:35:05,798 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/4724934b5f19471cb1873a522165a04a, hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/7b076d9eb1a3421aa5413f8d87828f8d, hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/dadf22b0cb794a68a1f958a2a8cbc93e] to archive 2024-11-22T04:35:05,802 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T04:35:05,805 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/4724934b5f19471cb1873a522165a04a to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/archive/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/4724934b5f19471cb1873a522165a04a 2024-11-22T04:35:05,807 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/7b076d9eb1a3421aa5413f8d87828f8d to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/archive/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/7b076d9eb1a3421aa5413f8d87828f8d 2024-11-22T04:35:05,809 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/dadf22b0cb794a68a1f958a2a8cbc93e to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/archive/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/info/dadf22b0cb794a68a1f958a2a8cbc93e 2024-11-22T04:35:05,818 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/.tmp/ns/8c42feff61944754b0d453f40dc6fd01 is 43, key is default/ns:d/1732250012569/Put/seqid=0 2024-11-22T04:35:05,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741852_1028 (size=5153) 2024-11-22T04:35:05,824 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741852_1028 (size=5153) 2024-11-22T04:35:05,824 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/.tmp/ns/8c42feff61944754b0d453f40dc6fd01 2024-11-22T04:35:05,820 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=8fc3ff0a63e6:37977 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-22T04:35:05,824 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [4724934b5f19471cb1873a522165a04a=12509, 7b076d9eb1a3421aa5413f8d87828f8d=12509, dadf22b0cb794a68a1f958a2a8cbc93e=12509] 2024-11-22T04:35:05,830 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/default/TestLogRolling-testSlowSyncLogRolling/849eaa6fb87d20548eb5a1f32af954ff/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-22T04:35:05,832 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. 2024-11-22T04:35:05,832 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 849eaa6fb87d20548eb5a1f32af954ff: Waiting for close lock at 1732250105760Running coprocessor pre-close hooks at 1732250105760Disabling compacts and flushes for region at 1732250105760Disabling writes for close at 1732250105761 (+1 ms)Obtaining lock to block concurrent updates at 1732250105762 (+1 ms)Preparing flush snapshotting stores in 849eaa6fb87d20548eb5a1f32af954ff at 1732250105762Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732250105762Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. at 1732250105764 (+2 ms)Flushing 849eaa6fb87d20548eb5a1f32af954ff/info: creating writer at 1732250105764Flushing 849eaa6fb87d20548eb5a1f32af954ff/info: appending metadata at 1732250105768 (+4 ms)Flushing 849eaa6fb87d20548eb5a1f32af954ff/info: closing flushed file at 1732250105768Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5da9a7d5: reopening flushed file at 1732250105785 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 849eaa6fb87d20548eb5a1f32af954ff in 36ms, sequenceid=48, compaction requested=true at 1732250105798 (+13 ms)Writing region close event to WAL at 1732250105825 (+27 ms)Running coprocessor post-close hooks at 1732250105830 (+5 ms)Closed at 1732250105832 (+2 ms) 2024-11-22T04:35:05,833 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732250012838.849eaa6fb87d20548eb5a1f32af954ff. 2024-11-22T04:35:05,849 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/.tmp/table/c0813173177249dbbed854fc983444e9 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732250013301/Put/seqid=0 2024-11-22T04:35:05,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741853_1029 (size=5396) 2024-11-22T04:35:05,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741853_1029 (size=5396) 2024-11-22T04:35:05,856 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/.tmp/table/c0813173177249dbbed854fc983444e9 2024-11-22T04:35:05,865 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/.tmp/info/165f3ae78896444d98e3680db926d9d0 as hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/info/165f3ae78896444d98e3680db926d9d0 2024-11-22T04:35:05,874 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/info/165f3ae78896444d98e3680db926d9d0, entries=10, sequenceid=11, filesize=6.9 K 2024-11-22T04:35:05,875 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/.tmp/ns/8c42feff61944754b0d453f40dc6fd01 as hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/ns/8c42feff61944754b0d453f40dc6fd01 2024-11-22T04:35:05,884 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/ns/8c42feff61944754b0d453f40dc6fd01, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T04:35:05,886 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/.tmp/table/c0813173177249dbbed854fc983444e9 as hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/table/c0813173177249dbbed854fc983444e9 2024-11-22T04:35:05,895 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/table/c0813173177249dbbed854fc983444e9, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T04:35:05,896 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 133ms, sequenceid=11, compaction requested=false 2024-11-22T04:35:05,902 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T04:35:05,903 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T04:35:05,904 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T04:35:05,904 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732250105762Running coprocessor pre-close hooks at 1732250105762Disabling compacts and flushes for region at 1732250105762Disabling writes for close at 1732250105763 (+1 ms)Obtaining lock to block concurrent updates at 1732250105763Preparing flush snapshotting stores in 1588230740 at 1732250105763Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732250105764 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732250105765 (+1 ms)Flushing 1588230740/info: creating writer at 1732250105765Flushing 1588230740/info: appending metadata at 1732250105784 (+19 ms)Flushing 1588230740/info: closing flushed file at 1732250105784Flushing 1588230740/ns: creating writer at 1732250105802 (+18 ms)Flushing 1588230740/ns: appending metadata at 1732250105817 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732250105817Flushing 1588230740/table: creating writer at 1732250105835 (+18 ms)Flushing 1588230740/table: appending metadata at 1732250105849 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732250105849Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15d0a23a: reopening flushed file at 1732250105864 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a7246ab: reopening flushed file at 1732250105874 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@362f50ab: reopening flushed file at 1732250105884 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 133ms, sequenceid=11, compaction requested=false at 1732250105896 (+12 ms)Writing region close event to WAL at 1732250105898 (+2 ms)Running coprocessor post-close hooks at 1732250105903 (+5 ms)Closed at 1732250105904 (+1 ms) 2024-11-22T04:35:05,904 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T04:35:05,964 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(976): stopping server 8fc3ff0a63e6,33755,1732250010424; all regions closed. 2024-11-22T04:35:05,966 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:05,966 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:05,967 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:05,967 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:05,967 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:05,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741834_1010 (size=3066) 2024-11-22T04:35:05,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741834_1010 (size=3066) 2024-11-22T04:35:05,977 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/oldWALs 2024-11-22T04:35:05,977 INFO [RS:0;8fc3ff0a63e6:33755 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C33755%2C1732250010424.meta:.meta(num 1732250012359) 2024-11-22T04:35:05,978 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:05,978 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:05,978 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:05,978 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:05,978 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:05,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741847_1023 (size=12695) 2024-11-22T04:35:05,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741847_1023 (size=12695) 2024-11-22T04:35:05,985 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/oldWALs 2024-11-22T04:35:05,985 INFO [RS:0;8fc3ff0a63e6:33755 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C33755%2C1732250010424:(num 1732250085577) 2024-11-22T04:35:05,985 DEBUG [RS:0;8fc3ff0a63e6:33755 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:05,985 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:35:05,985 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:35:05,985 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.ChoreService(370): Chore service for: regionserver/8fc3ff0a63e6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T04:35:05,986 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:35:05,986 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:35:05,986 INFO [RS:0;8fc3ff0a63e6:33755 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33755 2024-11-22T04:35:06,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:35:06,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8fc3ff0a63e6,33755,1732250010424 2024-11-22T04:35:06,001 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:35:06,012 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8fc3ff0a63e6,33755,1732250010424] 2024-11-22T04:35:06,022 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/8fc3ff0a63e6,33755,1732250010424 already deleted, retry=false 2024-11-22T04:35:06,022 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 8fc3ff0a63e6,33755,1732250010424 expired; onlineServers=0 2024-11-22T04:35:06,022 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '8fc3ff0a63e6,37977,1732250009659' ***** 2024-11-22T04:35:06,022 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T04:35:06,023 INFO [M:0;8fc3ff0a63e6:37977 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:35:06,023 INFO [M:0;8fc3ff0a63e6:37977 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:35:06,023 DEBUG [M:0;8fc3ff0a63e6:37977 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T04:35:06,023 DEBUG [M:0;8fc3ff0a63e6:37977 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T04:35:06,023 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T04:35:06,023 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250011577 {}] cleaner.HFileCleaner(306): Exit Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250011577,5,FailOnTimeoutGroup] 2024-11-22T04:35:06,023 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250011576 {}] cleaner.HFileCleaner(306): Exit Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250011576,5,FailOnTimeoutGroup] 2024-11-22T04:35:06,023 INFO [M:0;8fc3ff0a63e6:37977 {}] hbase.ChoreService(370): Chore service for: master/8fc3ff0a63e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T04:35:06,024 INFO [M:0;8fc3ff0a63e6:37977 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:35:06,024 DEBUG [M:0;8fc3ff0a63e6:37977 {}] master.HMaster(1795): Stopping service threads 2024-11-22T04:35:06,024 INFO [M:0;8fc3ff0a63e6:37977 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T04:35:06,024 INFO [M:0;8fc3ff0a63e6:37977 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T04:35:06,025 INFO [M:0;8fc3ff0a63e6:37977 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T04:35:06,025 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T04:35:06,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T04:35:06,033 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:06,033 DEBUG [M:0;8fc3ff0a63e6:37977 {}] zookeeper.ZKUtil(347): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T04:35:06,033 WARN [M:0;8fc3ff0a63e6:37977 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T04:35:06,034 INFO [M:0;8fc3ff0a63e6:37977 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/.lastflushedseqids 2024-11-22T04:35:06,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741854_1030 (size=130) 2024-11-22T04:35:06,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741854_1030 (size=130) 2024-11-22T04:35:06,046 INFO [M:0;8fc3ff0a63e6:37977 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T04:35:06,046 INFO [M:0;8fc3ff0a63e6:37977 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T04:35:06,046 DEBUG [M:0;8fc3ff0a63e6:37977 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T04:35:06,046 INFO [M:0;8fc3ff0a63e6:37977 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:06,046 DEBUG [M:0;8fc3ff0a63e6:37977 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:06,046 DEBUG [M:0;8fc3ff0a63e6:37977 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T04:35:06,046 DEBUG [M:0;8fc3ff0a63e6:37977 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:06,046 INFO [M:0;8fc3ff0a63e6:37977 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.04 KB heapSize=29.21 KB 2024-11-22T04:35:06,063 DEBUG [M:0;8fc3ff0a63e6:37977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1a95f49b699f4a4691adf118978b5ba0 is 82, key is hbase:meta,,1/info:regioninfo/1732250012450/Put/seqid=0 2024-11-22T04:35:06,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741855_1031 (size=5672) 2024-11-22T04:35:06,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741855_1031 (size=5672) 2024-11-22T04:35:06,069 INFO [M:0;8fc3ff0a63e6:37977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1a95f49b699f4a4691adf118978b5ba0 2024-11-22T04:35:06,097 DEBUG [M:0;8fc3ff0a63e6:37977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6de4326cc7fe46918571f54f9cab607c is 767, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732250013314/Put/seqid=0 2024-11-22T04:35:06,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741856_1032 (size=6248) 2024-11-22T04:35:06,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741856_1032 (size=6248) 2024-11-22T04:35:06,103 INFO [M:0;8fc3ff0a63e6:37977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.43 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6de4326cc7fe46918571f54f9cab607c 2024-11-22T04:35:06,110 INFO [M:0;8fc3ff0a63e6:37977 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6de4326cc7fe46918571f54f9cab607c 2024-11-22T04:35:06,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:35:06,112 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33755-0x10160d178c10001, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:35:06,113 INFO [RS:0;8fc3ff0a63e6:33755 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:35:06,113 INFO [RS:0;8fc3ff0a63e6:33755 {}] regionserver.HRegionServer(1031): Exiting; stopping=8fc3ff0a63e6,33755,1732250010424; zookeeper connection closed. 2024-11-22T04:35:06,113 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@355626fa {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@355626fa 2024-11-22T04:35:06,114 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T04:35:06,126 DEBUG [M:0;8fc3ff0a63e6:37977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/77b5b57b16cf4cc18fb16e270fbd26a1 is 69, key is 8fc3ff0a63e6,33755,1732250010424/rs:state/1732250011661/Put/seqid=0 2024-11-22T04:35:06,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741857_1033 (size=5156) 2024-11-22T04:35:06,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741857_1033 (size=5156) 2024-11-22T04:35:06,133 INFO [M:0;8fc3ff0a63e6:37977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/77b5b57b16cf4cc18fb16e270fbd26a1 2024-11-22T04:35:06,159 DEBUG [M:0;8fc3ff0a63e6:37977 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7ed6888aeb1d4fdab509f25992594566 is 52, key is load_balancer_on/state:d/1732250012813/Put/seqid=0 2024-11-22T04:35:06,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741858_1034 (size=5056) 2024-11-22T04:35:06,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741858_1034 (size=5056) 2024-11-22T04:35:06,166 INFO [M:0;8fc3ff0a63e6:37977 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7ed6888aeb1d4fdab509f25992594566 2024-11-22T04:35:06,175 DEBUG [M:0;8fc3ff0a63e6:37977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/1a95f49b699f4a4691adf118978b5ba0 as hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1a95f49b699f4a4691adf118978b5ba0 2024-11-22T04:35:06,184 INFO [M:0;8fc3ff0a63e6:37977 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/1a95f49b699f4a4691adf118978b5ba0, entries=8, sequenceid=59, filesize=5.5 K 2024-11-22T04:35:06,186 DEBUG [M:0;8fc3ff0a63e6:37977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/6de4326cc7fe46918571f54f9cab607c as hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6de4326cc7fe46918571f54f9cab607c 2024-11-22T04:35:06,194 INFO [M:0;8fc3ff0a63e6:37977 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 6de4326cc7fe46918571f54f9cab607c 2024-11-22T04:35:06,194 INFO [M:0;8fc3ff0a63e6:37977 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/6de4326cc7fe46918571f54f9cab607c, entries=6, sequenceid=59, filesize=6.1 K 2024-11-22T04:35:06,196 DEBUG [M:0;8fc3ff0a63e6:37977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/77b5b57b16cf4cc18fb16e270fbd26a1 as hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/77b5b57b16cf4cc18fb16e270fbd26a1 2024-11-22T04:35:06,203 INFO [M:0;8fc3ff0a63e6:37977 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/77b5b57b16cf4cc18fb16e270fbd26a1, entries=1, sequenceid=59, filesize=5.0 K 2024-11-22T04:35:06,205 DEBUG [M:0;8fc3ff0a63e6:37977 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7ed6888aeb1d4fdab509f25992594566 as hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7ed6888aeb1d4fdab509f25992594566 2024-11-22T04:35:06,212 INFO [M:0;8fc3ff0a63e6:37977 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7ed6888aeb1d4fdab509f25992594566, entries=1, sequenceid=59, filesize=4.9 K 2024-11-22T04:35:06,214 INFO [M:0;8fc3ff0a63e6:37977 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 168ms, sequenceid=59, compaction requested=false 2024-11-22T04:35:06,216 INFO [M:0;8fc3ff0a63e6:37977 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:06,216 DEBUG [M:0;8fc3ff0a63e6:37977 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732250106046Disabling compacts and flushes for region at 1732250106046Disabling writes for close at 1732250106046Obtaining lock to block concurrent updates at 1732250106047 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732250106047Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23588, getHeapSize=29848, getOffHeapSize=0, getCellsCount=70 at 1732250106047Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732250106048 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732250106048Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732250106063 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732250106063Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732250106075 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732250106096 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732250106096Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732250106110 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732250106126 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732250106126Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732250106139 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732250106159 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732250106159Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5da6c80: reopening flushed file at 1732250106173 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10f96f8f: reopening flushed file at 1732250106184 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6306c3d6: reopening flushed file at 1732250106194 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@554db608: reopening flushed file at 1732250106204 (+10 ms)Finished flush of dataSize ~23.04 KB/23588, heapSize ~29.15 KB/29848, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 168ms, sequenceid=59, compaction requested=false at 1732250106214 (+10 ms)Writing region close event to WAL at 1732250106215 (+1 ms)Closed at 1732250106215 2024-11-22T04:35:06,217 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:06,217 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:06,217 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:06,217 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:06,217 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:06,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41265 is added to blk_1073741830_1006 (size=27985) 2024-11-22T04:35:06,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35923 is added to blk_1073741830_1006 (size=27985) 2024-11-22T04:35:06,221 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:35:06,221 INFO [M:0;8fc3ff0a63e6:37977 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T04:35:06,221 INFO [M:0;8fc3ff0a63e6:37977 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37977 2024-11-22T04:35:06,221 INFO [M:0;8fc3ff0a63e6:37977 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:35:06,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:35:06,337 INFO [M:0;8fc3ff0a63e6:37977 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:35:06,337 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:37977-0x10160d178c10000, quorum=127.0.0.1:51545, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:35:06,341 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1467625d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:06,343 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@675921ed{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:35:06,344 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:35:06,344 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ec7bf2e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:35:06,344 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3369fbc0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/hadoop.log.dir/,STOPPED} 2024-11-22T04:35:06,347 WARN [BP-1604185574-172.17.0.2-1732250004553 heartbeating to localhost/127.0.0.1:37209 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:35:06,347 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:35:06,347 WARN [BP-1604185574-172.17.0.2-1732250004553 heartbeating to localhost/127.0.0.1:37209 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1604185574-172.17.0.2-1732250004553 (Datanode Uuid e63b29ae-ef87-4fe2-b19f-6fd426574dd2) service to localhost/127.0.0.1:37209 2024-11-22T04:35:06,347 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:35:06,348 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/cluster_b39b6d22-2e5b-b2ff-faef-eea1fa5c46c0/data/data3/current/BP-1604185574-172.17.0.2-1732250004553 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:06,348 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/cluster_b39b6d22-2e5b-b2ff-faef-eea1fa5c46c0/data/data4/current/BP-1604185574-172.17.0.2-1732250004553 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:06,349 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:35:06,351 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6c2fdbac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:06,351 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@461c65fd{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:35:06,351 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:35:06,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d13ec7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:35:06,352 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6355b7f5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/hadoop.log.dir/,STOPPED} 2024-11-22T04:35:06,353 WARN [BP-1604185574-172.17.0.2-1732250004553 heartbeating to localhost/127.0.0.1:37209 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:35:06,353 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:35:06,353 WARN [BP-1604185574-172.17.0.2-1732250004553 heartbeating to localhost/127.0.0.1:37209 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1604185574-172.17.0.2-1732250004553 (Datanode Uuid 3ca1c472-8fa9-46f4-b133-01adb121d757) service to localhost/127.0.0.1:37209 2024-11-22T04:35:06,353 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:35:06,354 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/cluster_b39b6d22-2e5b-b2ff-faef-eea1fa5c46c0/data/data1/current/BP-1604185574-172.17.0.2-1732250004553 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:06,354 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/cluster_b39b6d22-2e5b-b2ff-faef-eea1fa5c46c0/data/data2/current/BP-1604185574-172.17.0.2-1732250004553 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:06,355 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:35:06,363 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T04:35:06,364 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:35:06,364 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:35:06,364 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:35:06,364 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/hadoop.log.dir/,STOPPED} 2024-11-22T04:35:06,372 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T04:35:06,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T04:35:06,414 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=78 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37209 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/8fc3ff0a63e6:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@3e77a740 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:37209 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37209 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/8fc3ff0a63e6:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:37209 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: master/8fc3ff0a63e6:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/8fc3ff0a63e6:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37209 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37209 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:37209 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:37209 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=265 (was 272), ProcessCount=11 (was 11), AvailableMemoryMB=9536 (was 10127) 2024-11-22T04:35:06,421 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=79, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=265, ProcessCount=11, AvailableMemoryMB=9535 2024-11-22T04:35:06,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T04:35:06,421 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/hadoop.log.dir so I do NOT create it in target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9 2024-11-22T04:35:06,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/3d45c931-ec1a-9127-2fe4-b6b3a91579b4/hadoop.tmp.dir so I do NOT create it in target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9 2024-11-22T04:35:06,422 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/cluster_3e1105ce-0f98-db11-e02e-a3ba3bbc4423, deleteOnExit=true 2024-11-22T04:35:06,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T04:35:06,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/test.cache.data in system properties and HBase conf 2024-11-22T04:35:06,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T04:35:06,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/hadoop.log.dir in system properties and HBase conf 2024-11-22T04:35:06,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T04:35:06,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T04:35:06,422 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T04:35:06,423 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T04:35:06,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T04:35:06,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T04:35:06,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T04:35:06,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T04:35:06,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T04:35:06,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T04:35:06,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T04:35:06,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T04:35:06,423 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T04:35:06,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/nfs.dump.dir in system properties and HBase conf 2024-11-22T04:35:06,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/java.io.tmpdir in system properties and HBase conf 2024-11-22T04:35:06,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T04:35:06,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T04:35:06,424 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T04:35:06,437 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T04:35:06,792 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:35:06,800 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:35:06,802 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:35:06,802 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:35:06,802 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T04:35:06,805 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:35:06,806 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6b5fac92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:35:06,806 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54b8bf96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:35:06,920 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4e15a6d0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/java.io.tmpdir/jetty-localhost-42153-hadoop-hdfs-3_4_1-tests_jar-_-any-3868862955987646874/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T04:35:06,921 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2152d149{HTTP/1.1, (http/1.1)}{localhost:42153} 2024-11-22T04:35:06,921 INFO [Time-limited test {}] server.Server(415): Started @104187ms 2024-11-22T04:35:06,933 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T04:35:07,194 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:35:07,200 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:35:07,201 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:35:07,201 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:35:07,201 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:35:07,201 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@779c0b56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:35:07,202 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61783b0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:35:07,320 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@36505daf{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/java.io.tmpdir/jetty-localhost-38629-hadoop-hdfs-3_4_1-tests_jar-_-any-1141532254047452768/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:07,321 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21e00560{HTTP/1.1, (http/1.1)}{localhost:38629} 2024-11-22T04:35:07,321 INFO [Time-limited test {}] server.Server(415): Started @104587ms 2024-11-22T04:35:07,322 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:35:07,355 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:35:07,359 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:35:07,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:35:07,360 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:35:07,361 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:35:07,361 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1410bc86{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:35:07,361 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6082dc4f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:35:07,461 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@af33574{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/java.io.tmpdir/jetty-localhost-46543-hadoop-hdfs-3_4_1-tests_jar-_-any-15345436133362356770/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:07,461 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17d00685{HTTP/1.1, (http/1.1)}{localhost:46543} 2024-11-22T04:35:07,461 INFO [Time-limited test {}] server.Server(415): Started @104728ms 2024-11-22T04:35:07,463 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:35:07,751 INFO [regionserver/8fc3ff0a63e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:35:08,533 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/cluster_3e1105ce-0f98-db11-e02e-a3ba3bbc4423/data/data1/current/BP-1113426834-172.17.0.2-1732250106449/current, will proceed with Du for space computation calculation, 2024-11-22T04:35:08,533 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/cluster_3e1105ce-0f98-db11-e02e-a3ba3bbc4423/data/data2/current/BP-1113426834-172.17.0.2-1732250106449/current, will proceed with Du for space computation calculation, 2024-11-22T04:35:08,549 WARN [Thread-416 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:35:08,552 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8f29f26e18770996 with lease ID 0x32fbc02b4d0e0c4b: Processing first storage report for DS-0d9fee1a-1a68-4011-82d7-2a63cc0f03fe from datanode DatanodeRegistration(127.0.0.1:35711, datanodeUuid=c07a79a7-13db-4a9f-8536-4df37f5341af, infoPort=44263, infoSecurePort=0, ipcPort=45723, storageInfo=lv=-57;cid=testClusterID;nsid=1912349693;c=1732250106449) 2024-11-22T04:35:08,552 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8f29f26e18770996 with lease ID 0x32fbc02b4d0e0c4b: from storage DS-0d9fee1a-1a68-4011-82d7-2a63cc0f03fe node DatanodeRegistration(127.0.0.1:35711, datanodeUuid=c07a79a7-13db-4a9f-8536-4df37f5341af, infoPort=44263, infoSecurePort=0, ipcPort=45723, storageInfo=lv=-57;cid=testClusterID;nsid=1912349693;c=1732250106449), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:35:08,552 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8f29f26e18770996 with lease ID 0x32fbc02b4d0e0c4b: Processing first storage report for DS-7e208939-400f-4cf8-b6ce-9399a97774e7 from datanode DatanodeRegistration(127.0.0.1:35711, datanodeUuid=c07a79a7-13db-4a9f-8536-4df37f5341af, infoPort=44263, infoSecurePort=0, ipcPort=45723, storageInfo=lv=-57;cid=testClusterID;nsid=1912349693;c=1732250106449) 2024-11-22T04:35:08,552 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8f29f26e18770996 with lease ID 0x32fbc02b4d0e0c4b: from storage DS-7e208939-400f-4cf8-b6ce-9399a97774e7 node DatanodeRegistration(127.0.0.1:35711, datanodeUuid=c07a79a7-13db-4a9f-8536-4df37f5341af, infoPort=44263, infoSecurePort=0, ipcPort=45723, storageInfo=lv=-57;cid=testClusterID;nsid=1912349693;c=1732250106449), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:35:08,675 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/cluster_3e1105ce-0f98-db11-e02e-a3ba3bbc4423/data/data3/current/BP-1113426834-172.17.0.2-1732250106449/current, will proceed with Du for space computation calculation, 2024-11-22T04:35:08,675 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/cluster_3e1105ce-0f98-db11-e02e-a3ba3bbc4423/data/data4/current/BP-1113426834-172.17.0.2-1732250106449/current, will proceed with Du for space computation calculation, 2024-11-22T04:35:08,690 WARN [Thread-439 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:35:08,693 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96afb6cbf52b7df7 with lease ID 0x32fbc02b4d0e0c4c: Processing first storage report for DS-8121d4f5-10bb-41d5-bd7f-d4bfac4b030e from datanode DatanodeRegistration(127.0.0.1:36823, datanodeUuid=e2c426cd-f6e5-4600-9404-6b3abc1c3eda, infoPort=33677, infoSecurePort=0, ipcPort=41683, storageInfo=lv=-57;cid=testClusterID;nsid=1912349693;c=1732250106449) 2024-11-22T04:35:08,693 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96afb6cbf52b7df7 with lease ID 0x32fbc02b4d0e0c4c: from storage DS-8121d4f5-10bb-41d5-bd7f-d4bfac4b030e node DatanodeRegistration(127.0.0.1:36823, datanodeUuid=e2c426cd-f6e5-4600-9404-6b3abc1c3eda, infoPort=33677, infoSecurePort=0, ipcPort=41683, storageInfo=lv=-57;cid=testClusterID;nsid=1912349693;c=1732250106449), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T04:35:08,693 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x96afb6cbf52b7df7 with lease ID 0x32fbc02b4d0e0c4c: Processing first storage report for DS-f668d553-38c1-42b8-a479-189659900a19 from datanode DatanodeRegistration(127.0.0.1:36823, datanodeUuid=e2c426cd-f6e5-4600-9404-6b3abc1c3eda, infoPort=33677, infoSecurePort=0, ipcPort=41683, storageInfo=lv=-57;cid=testClusterID;nsid=1912349693;c=1732250106449) 2024-11-22T04:35:08,693 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x96afb6cbf52b7df7 with lease ID 0x32fbc02b4d0e0c4c: from storage DS-f668d553-38c1-42b8-a479-189659900a19 node DatanodeRegistration(127.0.0.1:36823, datanodeUuid=e2c426cd-f6e5-4600-9404-6b3abc1c3eda, infoPort=33677, infoSecurePort=0, ipcPort=41683, storageInfo=lv=-57;cid=testClusterID;nsid=1912349693;c=1732250106449), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:35:08,702 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9 2024-11-22T04:35:08,705 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/cluster_3e1105ce-0f98-db11-e02e-a3ba3bbc4423/zookeeper_0, clientPort=54000, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/cluster_3e1105ce-0f98-db11-e02e-a3ba3bbc4423/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/cluster_3e1105ce-0f98-db11-e02e-a3ba3bbc4423/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T04:35:08,706 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54000 2024-11-22T04:35:08,706 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:08,707 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:08,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741825_1001 (size=7) 2024-11-22T04:35:08,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741825_1001 (size=7) 2024-11-22T04:35:08,720 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d with version=8 2024-11-22T04:35:08,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/hbase-staging 2024-11-22T04:35:08,722 INFO [Time-limited test {}] client.ConnectionUtils(128): master/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:35:08,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:08,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:08,722 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:35:08,723 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:08,723 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:35:08,723 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T04:35:08,723 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:35:08,724 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41745 2024-11-22T04:35:08,725 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41745 connecting to ZooKeeper ensemble=127.0.0.1:54000 2024-11-22T04:35:08,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:417450x0, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:35:08,783 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41745-0x10160d2fecc0000 connected 2024-11-22T04:35:08,869 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:08,871 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:08,875 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:35:08,875 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d, hbase.cluster.distributed=false 2024-11-22T04:35:08,878 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:35:08,879 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41745 2024-11-22T04:35:08,879 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41745 2024-11-22T04:35:08,879 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41745 2024-11-22T04:35:08,879 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41745 2024-11-22T04:35:08,880 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41745 2024-11-22T04:35:08,898 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:35:08,898 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:08,898 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:08,898 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:35:08,898 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:08,898 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:35:08,899 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T04:35:08,899 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:35:08,899 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37379 2024-11-22T04:35:08,902 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37379 connecting to ZooKeeper ensemble=127.0.0.1:54000 2024-11-22T04:35:08,903 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:08,906 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:08,921 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:373790x0, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:35:08,922 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37379-0x10160d2fecc0001 connected 2024-11-22T04:35:08,922 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:35:08,923 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T04:35:08,923 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T04:35:08,924 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T04:35:08,925 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:35:08,928 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37379 2024-11-22T04:35:08,928 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37379 2024-11-22T04:35:08,929 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37379 2024-11-22T04:35:08,931 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37379 2024-11-22T04:35:08,931 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37379 2024-11-22T04:35:08,947 DEBUG [M:0;8fc3ff0a63e6:41745 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;8fc3ff0a63e6:41745 2024-11-22T04:35:08,947 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/8fc3ff0a63e6,41745,1732250108722 2024-11-22T04:35:08,960 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:35:08,960 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:35:08,961 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/8fc3ff0a63e6,41745,1732250108722 2024-11-22T04:35:08,970 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T04:35:08,970 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:08,970 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:08,971 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T04:35:08,971 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/8fc3ff0a63e6,41745,1732250108722 from backup master directory 2024-11-22T04:35:08,981 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:35:08,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/8fc3ff0a63e6,41745,1732250108722 2024-11-22T04:35:08,981 WARN [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:35:08,981 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:35:08,981 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=8fc3ff0a63e6,41745,1732250108722 2024-11-22T04:35:08,986 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/hbase.id] with ID: 7a07c6cf-0b27-4822-ae47-1425aa358901 2024-11-22T04:35:08,986 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/.tmp/hbase.id 2024-11-22T04:35:08,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:35:08,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:35:08,995 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/.tmp/hbase.id]:[hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/hbase.id] 2024-11-22T04:35:09,011 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:09,011 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T04:35:09,013 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-22T04:35:09,026 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:09,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:09,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741827_1003 (size=196) 2024-11-22T04:35:09,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741827_1003 (size=196) 2024-11-22T04:35:09,035 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T04:35:09,035 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T04:35:09,036 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:35:09,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:35:09,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:35:09,048 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store 2024-11-22T04:35:09,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741829_1005 (size=34) 2024-11-22T04:35:09,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741829_1005 (size=34) 2024-11-22T04:35:09,057 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:35:09,057 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T04:35:09,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:09,057 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:09,057 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T04:35:09,057 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:09,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:09,057 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732250109057Disabling compacts and flushes for region at 1732250109057Disabling writes for close at 1732250109057Writing region close event to WAL at 1732250109057Closed at 1732250109057 2024-11-22T04:35:09,058 WARN [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/.initializing 2024-11-22T04:35:09,059 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/WALs/8fc3ff0a63e6,41745,1732250108722 2024-11-22T04:35:09,062 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C41745%2C1732250108722, suffix=, logDir=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/WALs/8fc3ff0a63e6,41745,1732250108722, archiveDir=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/oldWALs, maxLogs=10 2024-11-22T04:35:09,062 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C41745%2C1732250108722.1732250109062 2024-11-22T04:35:09,067 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/WALs/8fc3ff0a63e6,41745,1732250108722/8fc3ff0a63e6%2C41745%2C1732250108722.1732250109062 2024-11-22T04:35:09,068 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33677:33677),(127.0.0.1/127.0.0.1:44263:44263)] 2024-11-22T04:35:09,072 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:35:09,072 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:35:09,072 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:09,072 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:09,074 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:09,076 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T04:35:09,076 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:09,077 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:09,077 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:09,079 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T04:35:09,080 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:09,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:35:09,080 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:09,083 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T04:35:09,084 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:09,085 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:35:09,085 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:09,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T04:35:09,087 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:09,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:35:09,088 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:09,089 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:09,090 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:09,091 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:09,091 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:09,092 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T04:35:09,093 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:09,096 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:35:09,096 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=731426, jitterRate=-0.06994377076625824}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T04:35:09,098 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732250109072Initializing all the Stores at 1732250109073 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250109073Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250109074 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250109074Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250109074Cleaning up temporary data from old regions at 1732250109091 (+17 ms)Region opened successfully at 1732250109097 (+6 ms) 2024-11-22T04:35:09,098 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T04:35:09,102 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@210ff31f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:35:09,103 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T04:35:09,103 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T04:35:09,103 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T04:35:09,104 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T04:35:09,104 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T04:35:09,105 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T04:35:09,105 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T04:35:09,107 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T04:35:09,108 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T04:35:09,118 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T04:35:09,118 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T04:35:09,119 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T04:35:09,132 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T04:35:09,132 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T04:35:09,133 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T04:35:09,142 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T04:35:09,143 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T04:35:09,153 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T04:35:09,156 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T04:35:09,163 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T04:35:09,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T04:35:09,174 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T04:35:09,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:09,174 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:09,175 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=8fc3ff0a63e6,41745,1732250108722, sessionid=0x10160d2fecc0000, setting cluster-up flag (Was=false) 2024-11-22T04:35:09,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:09,195 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:09,226 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T04:35:09,229 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8fc3ff0a63e6,41745,1732250108722 2024-11-22T04:35:09,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:09,248 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:09,279 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T04:35:09,281 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8fc3ff0a63e6,41745,1732250108722 2024-11-22T04:35:09,283 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T04:35:09,285 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T04:35:09,286 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T04:35:09,286 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T04:35:09,286 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 8fc3ff0a63e6,41745,1732250108722 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T04:35:09,288 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:35:09,288 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:35:09,288 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:35:09,289 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:35:09,289 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/8fc3ff0a63e6:0, corePoolSize=10, maxPoolSize=10 2024-11-22T04:35:09,289 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:09,289 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:35:09,289 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:09,290 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732250139290 2024-11-22T04:35:09,290 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T04:35:09,290 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T04:35:09,291 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T04:35:09,291 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T04:35:09,291 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T04:35:09,291 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T04:35:09,291 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:09,291 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T04:35:09,291 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:35:09,292 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T04:35:09,292 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T04:35:09,292 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T04:35:09,292 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T04:35:09,292 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T04:35:09,293 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250109292,5,FailOnTimeoutGroup] 2024-11-22T04:35:09,293 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250109293,5,FailOnTimeoutGroup] 2024-11-22T04:35:09,293 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:09,293 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T04:35:09,293 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:09,293 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:09,293 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:09,293 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T04:35:09,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:35:09,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:35:09,303 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T04:35:09,304 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d 2024-11-22T04:35:09,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:35:09,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:35:09,313 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:35:09,315 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T04:35:09,316 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T04:35:09,316 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:09,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:09,317 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T04:35:09,319 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T04:35:09,319 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:09,319 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:09,320 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T04:35:09,321 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T04:35:09,321 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:09,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:09,322 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T04:35:09,323 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T04:35:09,324 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:09,324 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:09,324 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T04:35:09,325 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/data/hbase/meta/1588230740 2024-11-22T04:35:09,326 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/data/hbase/meta/1588230740 2024-11-22T04:35:09,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T04:35:09,327 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T04:35:09,328 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T04:35:09,330 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T04:35:09,332 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:35:09,333 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=713978, jitterRate=-0.09213057160377502}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T04:35:09,334 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(746): ClusterId : 7a07c6cf-0b27-4822-ae47-1425aa358901 2024-11-22T04:35:09,334 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T04:35:09,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732250109313Initializing all the Stores at 1732250109314 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250109314Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250109314Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250109314Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250109314Cleaning up temporary data from old regions at 1732250109327 (+13 ms)Region opened successfully at 1732250109335 (+8 ms) 2024-11-22T04:35:09,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T04:35:09,335 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T04:35:09,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T04:35:09,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T04:35:09,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T04:35:09,336 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T04:35:09,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732250109335Disabling compacts and flushes for region at 1732250109335Disabling writes for close at 1732250109335Writing region close event to WAL at 1732250109336 (+1 ms)Closed at 1732250109336 2024-11-22T04:35:09,338 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:35:09,338 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T04:35:09,338 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T04:35:09,340 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T04:35:09,342 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T04:35:09,343 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T04:35:09,343 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T04:35:09,354 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T04:35:09,354 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d06858c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:35:09,373 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;8fc3ff0a63e6:37379 2024-11-22T04:35:09,374 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T04:35:09,374 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T04:35:09,374 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T04:35:09,375 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(2659): reportForDuty to master=8fc3ff0a63e6,41745,1732250108722 with port=37379, startcode=1732250108897 2024-11-22T04:35:09,375 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T04:35:09,379 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47337, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T04:35:09,380 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41745 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 8fc3ff0a63e6,37379,1732250108897 2024-11-22T04:35:09,380 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41745 {}] master.ServerManager(517): Registering regionserver=8fc3ff0a63e6,37379,1732250108897 2024-11-22T04:35:09,383 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d 2024-11-22T04:35:09,383 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38711 2024-11-22T04:35:09,383 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T04:35:09,395 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:35:09,396 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] zookeeper.ZKUtil(111): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8fc3ff0a63e6,37379,1732250108897 2024-11-22T04:35:09,396 WARN [RS:0;8fc3ff0a63e6:37379 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:35:09,397 INFO [RS:0;8fc3ff0a63e6:37379 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:35:09,397 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/WALs/8fc3ff0a63e6,37379,1732250108897 2024-11-22T04:35:09,397 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8fc3ff0a63e6,37379,1732250108897] 2024-11-22T04:35:09,405 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T04:35:09,409 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T04:35:09,411 INFO [RS:0;8fc3ff0a63e6:37379 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T04:35:09,411 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:09,411 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T04:35:09,412 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T04:35:09,413 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:09,413 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:09,413 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:09,413 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:09,413 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:09,413 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:09,414 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:35:09,414 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:09,414 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:09,414 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:09,414 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:09,414 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:09,414 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:09,414 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:35:09,414 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:35:09,419 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:09,420 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:09,420 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:09,420 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:09,420 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:09,420 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,37379,1732250108897-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:35:09,436 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T04:35:09,436 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,37379,1732250108897-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:09,436 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:09,436 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.Replication(171): 8fc3ff0a63e6,37379,1732250108897 started 2024-11-22T04:35:09,453 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:09,453 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(1482): Serving as 8fc3ff0a63e6,37379,1732250108897, RpcServer on 8fc3ff0a63e6/172.17.0.2:37379, sessionid=0x10160d2fecc0001 2024-11-22T04:35:09,453 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T04:35:09,454 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8fc3ff0a63e6,37379,1732250108897 2024-11-22T04:35:09,454 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,37379,1732250108897' 2024-11-22T04:35:09,454 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T04:35:09,454 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T04:35:09,455 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T04:35:09,455 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T04:35:09,455 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8fc3ff0a63e6,37379,1732250108897 2024-11-22T04:35:09,455 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,37379,1732250108897' 2024-11-22T04:35:09,455 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T04:35:09,456 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T04:35:09,456 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T04:35:09,457 INFO [RS:0;8fc3ff0a63e6:37379 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T04:35:09,457 INFO [RS:0;8fc3ff0a63e6:37379 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T04:35:09,492 WARN [8fc3ff0a63e6:41745 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T04:35:09,560 INFO [RS:0;8fc3ff0a63e6:37379 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C37379%2C1732250108897, suffix=, logDir=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/WALs/8fc3ff0a63e6,37379,1732250108897, archiveDir=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/oldWALs, maxLogs=32 2024-11-22T04:35:09,561 INFO [RS:0;8fc3ff0a63e6:37379 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C37379%2C1732250108897.1732250109561 2024-11-22T04:35:09,568 INFO [RS:0;8fc3ff0a63e6:37379 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/WALs/8fc3ff0a63e6,37379,1732250108897/8fc3ff0a63e6%2C37379%2C1732250108897.1732250109561 2024-11-22T04:35:09,569 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33677:33677),(127.0.0.1/127.0.0.1:44263:44263)] 2024-11-22T04:35:09,742 DEBUG [8fc3ff0a63e6:41745 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T04:35:09,743 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=8fc3ff0a63e6,37379,1732250108897 2024-11-22T04:35:09,747 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8fc3ff0a63e6,37379,1732250108897, state=OPENING 2024-11-22T04:35:09,795 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T04:35:09,805 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:09,805 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:09,807 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:35:09,807 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T04:35:09,807 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:35:09,807 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,37379,1732250108897}] 2024-11-22T04:35:09,964 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T04:35:09,971 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37943, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T04:35:09,976 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T04:35:09,976 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:35:09,979 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C37379%2C1732250108897.meta, suffix=.meta, logDir=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/WALs/8fc3ff0a63e6,37379,1732250108897, archiveDir=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/oldWALs, maxLogs=32 2024-11-22T04:35:09,982 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C37379%2C1732250108897.meta.1732250109982.meta 2024-11-22T04:35:09,988 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/WALs/8fc3ff0a63e6,37379,1732250108897/8fc3ff0a63e6%2C37379%2C1732250108897.meta.1732250109982.meta 2024-11-22T04:35:09,989 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44263:44263),(127.0.0.1/127.0.0.1:33677:33677)] 2024-11-22T04:35:09,990 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:35:09,990 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T04:35:09,991 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T04:35:09,991 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T04:35:09,991 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T04:35:09,991 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:35:09,991 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T04:35:09,991 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T04:35:09,993 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T04:35:09,993 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T04:35:09,994 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:09,994 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:09,994 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T04:35:09,995 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T04:35:09,995 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:09,996 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:09,996 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T04:35:09,997 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T04:35:09,997 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:09,997 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T04:35:09,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:09,998 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T04:35:09,998 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-22T04:35:09,999 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T04:35:09,999 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:10,000 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:10,000 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T04:35:10,001 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/data/hbase/meta/1588230740 2024-11-22T04:35:10,002 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/data/hbase/meta/1588230740 2024-11-22T04:35:10,004 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T04:35:10,004 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T04:35:10,005 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T04:35:10,006 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T04:35:10,007 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=777503, jitterRate=-0.011355027556419373}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T04:35:10,008 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T04:35:10,008 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732250109991Writing region info on filesystem at 1732250109991Initializing all the Stores at 1732250109992 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250109992Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250109992Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250109992Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250109992Cleaning up temporary data from old regions at 1732250110004 (+12 ms)Running coprocessor post-open hooks at 1732250110008 (+4 ms)Region opened successfully at 1732250110008 2024-11-22T04:35:10,010 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732250109963 2024-11-22T04:35:10,012 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T04:35:10,013 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T04:35:10,013 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=8fc3ff0a63e6,37379,1732250108897 2024-11-22T04:35:10,015 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8fc3ff0a63e6,37379,1732250108897, state=OPEN 2024-11-22T04:35:10,056 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T04:35:10,056 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T04:35:10,057 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,37379,1732250108897 2024-11-22T04:35:10,057 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:35:10,057 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:35:10,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T04:35:10,062 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,37379,1732250108897 in 250 msec 2024-11-22T04:35:10,067 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T04:35:10,067 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 724 msec 2024-11-22T04:35:10,069 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:35:10,069 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T04:35:10,071 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T04:35:10,072 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8fc3ff0a63e6,37379,1732250108897, seqNum=-1] 2024-11-22T04:35:10,072 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T04:35:10,074 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54375, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T04:35:10,084 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 798 msec 2024-11-22T04:35:10,084 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732250110084, completionTime=-1 2024-11-22T04:35:10,084 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T04:35:10,084 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T04:35:10,086 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T04:35:10,086 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732250170086 2024-11-22T04:35:10,086 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732250230086 2024-11-22T04:35:10,086 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T04:35:10,087 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41745,1732250108722-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:10,087 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41745,1732250108722-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:10,087 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41745,1732250108722-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:10,087 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-8fc3ff0a63e6:41745, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:10,087 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:10,087 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:10,089 DEBUG [master/8fc3ff0a63e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T04:35:10,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.111sec 2024-11-22T04:35:10,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T04:35:10,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T04:35:10,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T04:35:10,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T04:35:10,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T04:35:10,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41745,1732250108722-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:35:10,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41745,1732250108722-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T04:35:10,095 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T04:35:10,095 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T04:35:10,095 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41745,1732250108722-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:10,135 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@222181c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:35:10,135 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 8fc3ff0a63e6,41745,-1 for getting cluster id 2024-11-22T04:35:10,135 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T04:35:10,137 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7a07c6cf-0b27-4822-ae47-1425aa358901' 2024-11-22T04:35:10,138 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T04:35:10,138 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7a07c6cf-0b27-4822-ae47-1425aa358901" 2024-11-22T04:35:10,139 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60420209, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:35:10,139 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [8fc3ff0a63e6,41745,-1] 2024-11-22T04:35:10,140 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T04:35:10,140 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:10,143 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59028, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T04:35:10,144 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6512930c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:35:10,145 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T04:35:10,146 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8fc3ff0a63e6,37379,1732250108897, seqNum=-1] 2024-11-22T04:35:10,147 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T04:35:10,151 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46728, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T04:35:10,154 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=8fc3ff0a63e6,41745,1732250108722 2024-11-22T04:35:10,154 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:10,157 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T04:35:10,158 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T04:35:10,158 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T04:35:10,158 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:35:10,158 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:10,158 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:10,158 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T04:35:10,158 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T04:35:10,158 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1972713769, stopped=false 2024-11-22T04:35:10,158 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=8fc3ff0a63e6,41745,1732250108722 2024-11-22T04:35:10,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:35:10,181 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:35:10,181 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T04:35:10,181 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:10,181 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:10,181 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T04:35:10,181 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:35:10,182 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:10,182 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:35:10,182 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:35:10,182 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '8fc3ff0a63e6,37379,1732250108897' ***** 2024-11-22T04:35:10,182 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T04:35:10,182 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T04:35:10,183 INFO [RS:0;8fc3ff0a63e6:37379 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T04:35:10,182 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T04:35:10,183 INFO [RS:0;8fc3ff0a63e6:37379 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T04:35:10,183 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(959): stopping server 8fc3ff0a63e6,37379,1732250108897 2024-11-22T04:35:10,183 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:35:10,183 INFO [RS:0;8fc3ff0a63e6:37379 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;8fc3ff0a63e6:37379. 2024-11-22T04:35:10,183 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:35:10,183 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:10,183 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T04:35:10,183 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T04:35:10,183 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T04:35:10,183 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T04:35:10,183 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-22T04:35:10,183 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-22T04:35:10,184 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-22T04:35:10,184 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T04:35:10,184 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T04:35:10,184 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T04:35:10,184 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T04:35:10,184 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T04:35:10,184 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-22T04:35:10,200 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/data/hbase/meta/1588230740/.tmp/ns/4430eb9639a9437b83e99e1e62dd3c90 is 43, key is default/ns:d/1732250110075/Put/seqid=0 2024-11-22T04:35:10,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741835_1011 (size=5153) 2024-11-22T04:35:10,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741835_1011 (size=5153) 2024-11-22T04:35:10,206 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/data/hbase/meta/1588230740/.tmp/ns/4430eb9639a9437b83e99e1e62dd3c90 2024-11-22T04:35:10,214 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/data/hbase/meta/1588230740/.tmp/ns/4430eb9639a9437b83e99e1e62dd3c90 as hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/data/hbase/meta/1588230740/ns/4430eb9639a9437b83e99e1e62dd3c90 2024-11-22T04:35:10,221 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/data/hbase/meta/1588230740/ns/4430eb9639a9437b83e99e1e62dd3c90, entries=2, sequenceid=6, filesize=5.0 K 2024-11-22T04:35:10,223 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=6, compaction requested=false 2024-11-22T04:35:10,223 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T04:35:10,228 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T04:35:10,228 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T04:35:10,228 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T04:35:10,229 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732250110184Running coprocessor pre-close hooks at 1732250110184Disabling compacts and flushes for region at 1732250110184Disabling writes for close at 1732250110184Obtaining lock to block concurrent updates at 1732250110184Preparing flush snapshotting stores in 1588230740 at 1732250110184Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732250110184Flushing stores of hbase:meta,,1.1588230740 at 1732250110185 (+1 ms)Flushing 1588230740/ns: creating writer at 1732250110185Flushing 1588230740/ns: appending metadata at 1732250110199 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732250110200 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@72fb5af1: reopening flushed file at 1732250110213 (+13 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=6, compaction requested=false at 1732250110223 (+10 ms)Writing region close event to WAL at 1732250110224 (+1 ms)Running coprocessor post-close hooks at 1732250110228 (+4 ms)Closed at 1732250110228 2024-11-22T04:35:10,229 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T04:35:10,384 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(976): stopping server 8fc3ff0a63e6,37379,1732250108897; all regions closed. 2024-11-22T04:35:10,385 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,386 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,386 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,387 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,387 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741834_1010 (size=1152) 2024-11-22T04:35:10,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741834_1010 (size=1152) 2024-11-22T04:35:10,398 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/oldWALs 2024-11-22T04:35:10,398 INFO [RS:0;8fc3ff0a63e6:37379 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C37379%2C1732250108897.meta:.meta(num 1732250109982) 2024-11-22T04:35:10,399 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,399 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,399 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,399 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,400 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741833_1009 (size=93) 2024-11-22T04:35:10,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741833_1009 (size=93) 2024-11-22T04:35:10,405 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/oldWALs 2024-11-22T04:35:10,405 INFO [RS:0;8fc3ff0a63e6:37379 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C37379%2C1732250108897:(num 1732250109561) 2024-11-22T04:35:10,405 DEBUG [RS:0;8fc3ff0a63e6:37379 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:10,405 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:35:10,405 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:35:10,405 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.ChoreService(370): Chore service for: regionserver/8fc3ff0a63e6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T04:35:10,406 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:35:10,406 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:35:10,406 INFO [RS:0;8fc3ff0a63e6:37379 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37379 2024-11-22T04:35:10,416 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8fc3ff0a63e6,37379,1732250108897 2024-11-22T04:35:10,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:35:10,416 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:35:10,427 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8fc3ff0a63e6,37379,1732250108897] 2024-11-22T04:35:10,437 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/8fc3ff0a63e6,37379,1732250108897 already deleted, retry=false 2024-11-22T04:35:10,437 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 8fc3ff0a63e6,37379,1732250108897 expired; onlineServers=0 2024-11-22T04:35:10,437 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '8fc3ff0a63e6,41745,1732250108722' ***** 2024-11-22T04:35:10,437 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T04:35:10,438 INFO [M:0;8fc3ff0a63e6:41745 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:35:10,438 INFO [M:0;8fc3ff0a63e6:41745 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:35:10,438 DEBUG [M:0;8fc3ff0a63e6:41745 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T04:35:10,438 DEBUG [M:0;8fc3ff0a63e6:41745 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T04:35:10,438 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T04:35:10,439 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250109292 {}] cleaner.HFileCleaner(306): Exit Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250109292,5,FailOnTimeoutGroup] 2024-11-22T04:35:10,439 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250109293 {}] cleaner.HFileCleaner(306): Exit Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250109293,5,FailOnTimeoutGroup] 2024-11-22T04:35:10,439 INFO [M:0;8fc3ff0a63e6:41745 {}] hbase.ChoreService(370): Chore service for: master/8fc3ff0a63e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T04:35:10,440 INFO [M:0;8fc3ff0a63e6:41745 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:35:10,440 DEBUG [M:0;8fc3ff0a63e6:41745 {}] master.HMaster(1795): Stopping service threads 2024-11-22T04:35:10,440 INFO [M:0;8fc3ff0a63e6:41745 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T04:35:10,440 INFO [M:0;8fc3ff0a63e6:41745 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T04:35:10,441 INFO [M:0;8fc3ff0a63e6:41745 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T04:35:10,441 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T04:35:10,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T04:35:10,448 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:10,448 DEBUG [M:0;8fc3ff0a63e6:41745 {}] zookeeper.ZKUtil(347): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T04:35:10,448 WARN [M:0;8fc3ff0a63e6:41745 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T04:35:10,449 INFO [M:0;8fc3ff0a63e6:41745 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/.lastflushedseqids 2024-11-22T04:35:10,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741836_1012 (size=99) 2024-11-22T04:35:10,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741836_1012 (size=99) 2024-11-22T04:35:10,458 INFO [M:0;8fc3ff0a63e6:41745 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T04:35:10,459 INFO [M:0;8fc3ff0a63e6:41745 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T04:35:10,459 DEBUG [M:0;8fc3ff0a63e6:41745 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T04:35:10,459 INFO [M:0;8fc3ff0a63e6:41745 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:10,459 DEBUG [M:0;8fc3ff0a63e6:41745 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:10,459 DEBUG [M:0;8fc3ff0a63e6:41745 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T04:35:10,459 DEBUG [M:0;8fc3ff0a63e6:41745 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:10,459 INFO [M:0;8fc3ff0a63e6:41745 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-22T04:35:10,479 DEBUG [M:0;8fc3ff0a63e6:41745 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a3a3e656e29641c5b32be6c58f778e54 is 82, key is hbase:meta,,1/info:regioninfo/1732250110013/Put/seqid=0 2024-11-22T04:35:10,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741837_1013 (size=5672) 2024-11-22T04:35:10,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741837_1013 (size=5672) 2024-11-22T04:35:10,484 INFO [M:0;8fc3ff0a63e6:41745 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a3a3e656e29641c5b32be6c58f778e54 2024-11-22T04:35:10,505 DEBUG [M:0;8fc3ff0a63e6:41745 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7e281a6b01b24681bf737d3a7179017f is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732250110082/Put/seqid=0 2024-11-22T04:35:10,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741838_1014 (size=5275) 2024-11-22T04:35:10,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741838_1014 (size=5275) 2024-11-22T04:35:10,511 INFO [M:0;8fc3ff0a63e6:41745 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7e281a6b01b24681bf737d3a7179017f 2024-11-22T04:35:10,527 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:35:10,527 INFO [RS:0;8fc3ff0a63e6:37379 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:35:10,527 DEBUG [pool-180-thread-1-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37379-0x10160d2fecc0001, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:35:10,527 INFO [RS:0;8fc3ff0a63e6:37379 {}] regionserver.HRegionServer(1031): Exiting; stopping=8fc3ff0a63e6,37379,1732250108897; zookeeper connection closed. 2024-11-22T04:35:10,527 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3fe1800e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3fe1800e 2024-11-22T04:35:10,527 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T04:35:10,531 DEBUG [M:0;8fc3ff0a63e6:41745 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00efc2be6a404194bf53339ca59176ac is 69, key is 8fc3ff0a63e6,37379,1732250108897/rs:state/1732250109380/Put/seqid=0 2024-11-22T04:35:10,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741839_1015 (size=5156) 2024-11-22T04:35:10,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741839_1015 (size=5156) 2024-11-22T04:35:10,541 INFO [M:0;8fc3ff0a63e6:41745 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00efc2be6a404194bf53339ca59176ac 2024-11-22T04:35:10,561 DEBUG [M:0;8fc3ff0a63e6:41745 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/994478275b914a988a56aba4685858a2 is 52, key is load_balancer_on/state:d/1732250110156/Put/seqid=0 2024-11-22T04:35:10,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741840_1016 (size=5056) 2024-11-22T04:35:10,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741840_1016 (size=5056) 2024-11-22T04:35:10,568 INFO [M:0;8fc3ff0a63e6:41745 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/994478275b914a988a56aba4685858a2 2024-11-22T04:35:10,574 DEBUG [M:0;8fc3ff0a63e6:41745 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/a3a3e656e29641c5b32be6c58f778e54 as hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a3a3e656e29641c5b32be6c58f778e54 2024-11-22T04:35:10,581 INFO [M:0;8fc3ff0a63e6:41745 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/a3a3e656e29641c5b32be6c58f778e54, entries=8, sequenceid=29, filesize=5.5 K 2024-11-22T04:35:10,582 DEBUG [M:0;8fc3ff0a63e6:41745 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7e281a6b01b24681bf737d3a7179017f as hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7e281a6b01b24681bf737d3a7179017f 2024-11-22T04:35:10,588 INFO [M:0;8fc3ff0a63e6:41745 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7e281a6b01b24681bf737d3a7179017f, entries=3, sequenceid=29, filesize=5.2 K 2024-11-22T04:35:10,589 DEBUG [M:0;8fc3ff0a63e6:41745 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/00efc2be6a404194bf53339ca59176ac as hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/00efc2be6a404194bf53339ca59176ac 2024-11-22T04:35:10,595 INFO [M:0;8fc3ff0a63e6:41745 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/00efc2be6a404194bf53339ca59176ac, entries=1, sequenceid=29, filesize=5.0 K 2024-11-22T04:35:10,597 DEBUG [M:0;8fc3ff0a63e6:41745 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/994478275b914a988a56aba4685858a2 as hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/994478275b914a988a56aba4685858a2 2024-11-22T04:35:10,604 INFO [M:0;8fc3ff0a63e6:41745 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38711/user/jenkins/test-data/1bdc3f30-cf0a-685d-e76c-7cff24288d1d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/994478275b914a988a56aba4685858a2, entries=1, sequenceid=29, filesize=4.9 K 2024-11-22T04:35:10,605 INFO [M:0;8fc3ff0a63e6:41745 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=29, compaction requested=false 2024-11-22T04:35:10,607 INFO [M:0;8fc3ff0a63e6:41745 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:10,607 DEBUG [M:0;8fc3ff0a63e6:41745 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732250110459Disabling compacts and flushes for region at 1732250110459Disabling writes for close at 1732250110459Obtaining lock to block concurrent updates at 1732250110459Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732250110459Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732250110460 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732250110461 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732250110461Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732250110479 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732250110479Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732250110490 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732250110505 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732250110505Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732250110516 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732250110530 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732250110530Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732250110547 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732250110561 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732250110561Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@58c8641b: reopening flushed file at 1732250110573 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2365264: reopening flushed file at 1732250110581 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f2a378d: reopening flushed file at 1732250110588 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5c2f0308: reopening flushed file at 1732250110596 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 146ms, sequenceid=29, compaction requested=false at 1732250110605 (+9 ms)Writing region close event to WAL at 1732250110607 (+2 ms)Closed at 1732250110607 2024-11-22T04:35:10,607 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,607 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,607 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,607 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,608 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:10,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35711 is added to blk_1073741830_1006 (size=10311) 2024-11-22T04:35:10,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741830_1006 (size=10311) 2024-11-22T04:35:10,610 INFO [M:0;8fc3ff0a63e6:41745 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T04:35:10,610 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:35:10,610 INFO [M:0;8fc3ff0a63e6:41745 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41745 2024-11-22T04:35:10,610 INFO [M:0;8fc3ff0a63e6:41745 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:35:10,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:35:10,723 INFO [M:0;8fc3ff0a63e6:41745 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:35:10,723 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41745-0x10160d2fecc0000, quorum=127.0.0.1:54000, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:35:10,726 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@af33574{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:10,727 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17d00685{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:35:10,727 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:35:10,727 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6082dc4f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:35:10,728 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1410bc86{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/hadoop.log.dir/,STOPPED} 2024-11-22T04:35:10,730 WARN [BP-1113426834-172.17.0.2-1732250106449 heartbeating to localhost/127.0.0.1:38711 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:35:10,730 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:35:10,730 WARN [BP-1113426834-172.17.0.2-1732250106449 heartbeating to localhost/127.0.0.1:38711 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1113426834-172.17.0.2-1732250106449 (Datanode Uuid e2c426cd-f6e5-4600-9404-6b3abc1c3eda) service to localhost/127.0.0.1:38711 2024-11-22T04:35:10,730 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:35:10,730 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/cluster_3e1105ce-0f98-db11-e02e-a3ba3bbc4423/data/data3/current/BP-1113426834-172.17.0.2-1732250106449 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:10,731 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/cluster_3e1105ce-0f98-db11-e02e-a3ba3bbc4423/data/data4/current/BP-1113426834-172.17.0.2-1732250106449 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:10,731 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:35:10,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@36505daf{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:10,738 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21e00560{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:35:10,738 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:35:10,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61783b0f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:35:10,738 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@779c0b56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/hadoop.log.dir/,STOPPED} 2024-11-22T04:35:10,739 WARN [BP-1113426834-172.17.0.2-1732250106449 heartbeating to localhost/127.0.0.1:38711 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:35:10,739 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:35:10,739 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:35:10,739 WARN [BP-1113426834-172.17.0.2-1732250106449 heartbeating to localhost/127.0.0.1:38711 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1113426834-172.17.0.2-1732250106449 (Datanode Uuid c07a79a7-13db-4a9f-8536-4df37f5341af) service to localhost/127.0.0.1:38711 2024-11-22T04:35:10,740 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/cluster_3e1105ce-0f98-db11-e02e-a3ba3bbc4423/data/data1/current/BP-1113426834-172.17.0.2-1732250106449 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:10,740 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/cluster_3e1105ce-0f98-db11-e02e-a3ba3bbc4423/data/data2/current/BP-1113426834-172.17.0.2-1732250106449 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:10,741 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:35:10,745 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4e15a6d0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T04:35:10,746 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2152d149{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:35:10,746 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:35:10,746 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54b8bf96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:35:10,746 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6b5fac92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/hadoop.log.dir/,STOPPED} 2024-11-22T04:35:10,752 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T04:35:10,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T04:35:10,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T04:35:10,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/hadoop.log.dir so I do NOT create it in target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75 2024-11-22T04:35:10,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f3c9c450-7d76-b682-aa80-9569d8078cc9/hadoop.tmp.dir so I do NOT create it in target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75 2024-11-22T04:35:10,769 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65, deleteOnExit=true 2024-11-22T04:35:10,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T04:35:10,769 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/test.cache.data in system properties and HBase conf 2024-11-22T04:35:10,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T04:35:10,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir in system properties and HBase conf 2024-11-22T04:35:10,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T04:35:10,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T04:35:10,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T04:35:10,770 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T04:35:10,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T04:35:10,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T04:35:10,770 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T04:35:10,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T04:35:10,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T04:35:10,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T04:35:10,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T04:35:10,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T04:35:10,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T04:35:10,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/nfs.dump.dir in system properties and HBase conf 2024-11-22T04:35:10,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/java.io.tmpdir in system properties and HBase conf 2024-11-22T04:35:10,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T04:35:10,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T04:35:10,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T04:35:10,783 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T04:35:10,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:10,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:11,204 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-22T04:35:11,206 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:11,220 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:11,222 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:11,222 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:11,244 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:35:11,249 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:35:11,250 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:35:11,251 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:35:11,251 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:35:11,251 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:35:11,252 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69bbaec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:35:11,252 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7720beab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:35:11,345 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3feb978b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/java.io.tmpdir/jetty-localhost-40071-hadoop-hdfs-3_4_1-tests_jar-_-any-5253511736110289520/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T04:35:11,346 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@145c0180{HTTP/1.1, (http/1.1)}{localhost:40071} 2024-11-22T04:35:11,346 INFO [Time-limited test {}] server.Server(415): Started @108612ms 2024-11-22T04:35:11,357 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T04:35:11,420 INFO [regionserver/8fc3ff0a63e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:35:11,693 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:35:11,696 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:35:11,699 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:35:11,699 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:35:11,699 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:35:11,699 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4d2d9832{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:35:11,700 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f14f219{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:35:11,792 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@144f8866{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/java.io.tmpdir/jetty-localhost-38991-hadoop-hdfs-3_4_1-tests_jar-_-any-97405276091877395/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:11,793 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1e0a663b{HTTP/1.1, (http/1.1)}{localhost:38991} 2024-11-22T04:35:11,793 INFO [Time-limited test {}] server.Server(415): Started @109059ms 2024-11-22T04:35:11,794 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:35:11,821 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:35:11,825 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:35:11,826 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:35:11,826 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:35:11,826 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T04:35:11,827 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3574ce3f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:35:11,827 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1baf7059{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:35:11,926 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a9565fd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/java.io.tmpdir/jetty-localhost-34185-hadoop-hdfs-3_4_1-tests_jar-_-any-5362917682314606324/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:11,927 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@115f1319{HTTP/1.1, (http/1.1)}{localhost:34185} 2024-11-22T04:35:11,927 INFO [Time-limited test {}] server.Server(415): Started @109193ms 2024-11-22T04:35:11,928 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:35:12,946 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data2/current/BP-2085118315-172.17.0.2-1732250110796/current, will proceed with Du for space computation calculation, 2024-11-22T04:35:12,946 WARN [Thread-672 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data1/current/BP-2085118315-172.17.0.2-1732250110796/current, will proceed with Du for space computation calculation, 2024-11-22T04:35:12,966 WARN [Thread-636 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:35:12,969 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe050e19208fca157 with lease ID 0xa7190061b2006ba8: Processing first storage report for DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875 from datanode DatanodeRegistration(127.0.0.1:40269, datanodeUuid=014235c3-84e3-4eb9-af71-a852bd8908e7, infoPort=40579, infoSecurePort=0, ipcPort=33685, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796) 2024-11-22T04:35:12,969 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe050e19208fca157 with lease ID 0xa7190061b2006ba8: from storage DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875 node DatanodeRegistration(127.0.0.1:40269, datanodeUuid=014235c3-84e3-4eb9-af71-a852bd8908e7, infoPort=40579, infoSecurePort=0, ipcPort=33685, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:35:12,969 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe050e19208fca157 with lease ID 0xa7190061b2006ba8: Processing first storage report for DS-ddaa4601-3d27-4d86-ba0e-0f08b399f158 from datanode DatanodeRegistration(127.0.0.1:40269, datanodeUuid=014235c3-84e3-4eb9-af71-a852bd8908e7, infoPort=40579, infoSecurePort=0, ipcPort=33685, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796) 2024-11-22T04:35:12,969 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe050e19208fca157 with lease ID 0xa7190061b2006ba8: from storage DS-ddaa4601-3d27-4d86-ba0e-0f08b399f158 node DatanodeRegistration(127.0.0.1:40269, datanodeUuid=014235c3-84e3-4eb9-af71-a852bd8908e7, infoPort=40579, infoSecurePort=0, ipcPort=33685, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:35:13,081 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data4/current/BP-2085118315-172.17.0.2-1732250110796/current, will proceed with Du for space computation calculation, 2024-11-22T04:35:13,081 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data3/current/BP-2085118315-172.17.0.2-1732250110796/current, will proceed with Du for space computation calculation, 2024-11-22T04:35:13,105 WARN [Thread-659 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:35:13,107 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x51ca5214c7ce12a7 with lease ID 0xa7190061b2006ba9: Processing first storage report for DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc from datanode DatanodeRegistration(127.0.0.1:43317, datanodeUuid=af0b0e36-61ae-483f-a85d-ba85a4dacb3f, infoPort=43935, infoSecurePort=0, ipcPort=40503, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796) 2024-11-22T04:35:13,107 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x51ca5214c7ce12a7 with lease ID 0xa7190061b2006ba9: from storage DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc node DatanodeRegistration(127.0.0.1:43317, datanodeUuid=af0b0e36-61ae-483f-a85d-ba85a4dacb3f, infoPort=43935, infoSecurePort=0, ipcPort=40503, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:35:13,108 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x51ca5214c7ce12a7 with lease ID 0xa7190061b2006ba9: Processing first storage report for DS-f22eae57-3539-45da-968f-18b89499b62a from datanode DatanodeRegistration(127.0.0.1:43317, datanodeUuid=af0b0e36-61ae-483f-a85d-ba85a4dacb3f, infoPort=43935, infoSecurePort=0, ipcPort=40503, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796) 2024-11-22T04:35:13,108 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x51ca5214c7ce12a7 with lease ID 0xa7190061b2006ba9: from storage DS-f22eae57-3539-45da-968f-18b89499b62a node DatanodeRegistration(127.0.0.1:43317, datanodeUuid=af0b0e36-61ae-483f-a85d-ba85a4dacb3f, infoPort=43935, infoSecurePort=0, ipcPort=40503, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:35:13,169 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75 2024-11-22T04:35:13,172 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/zookeeper_0, clientPort=49807, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T04:35:13,173 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49807 2024-11-22T04:35:13,173 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:13,175 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:13,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40269 is added to blk_1073741825_1001 (size=7) 2024-11-22T04:35:13,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43317 is added to blk_1073741825_1001 (size=7) 2024-11-22T04:35:13,186 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0 with version=8 2024-11-22T04:35:13,187 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/hbase-staging 2024-11-22T04:35:13,188 INFO [Time-limited test {}] client.ConnectionUtils(128): master/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:35:13,189 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:13,189 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:13,189 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:35:13,189 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:13,189 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:35:13,189 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T04:35:13,189 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:35:13,190 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46195 2024-11-22T04:35:13,191 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46195 connecting to ZooKeeper ensemble=127.0.0.1:49807 2024-11-22T04:35:13,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:461950x0, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:35:13,248 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46195-0x10160d3103f0000 connected 2024-11-22T04:35:13,332 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:13,335 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:13,338 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:35:13,339 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0, hbase.cluster.distributed=false 2024-11-22T04:35:13,341 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:35:13,341 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46195 2024-11-22T04:35:13,341 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46195 2024-11-22T04:35:13,342 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46195 2024-11-22T04:35:13,342 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46195 2024-11-22T04:35:13,342 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46195 2024-11-22T04:35:13,358 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:35:13,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:13,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:13,358 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:35:13,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:13,358 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:35:13,359 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T04:35:13,359 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:35:13,359 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37839 2024-11-22T04:35:13,361 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37839 connecting to ZooKeeper ensemble=127.0.0.1:49807 2024-11-22T04:35:13,362 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:13,363 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:13,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:378390x0, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:35:13,374 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:35:13,374 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37839-0x10160d3103f0001 connected 2024-11-22T04:35:13,375 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T04:35:13,375 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T04:35:13,376 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T04:35:13,377 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:35:13,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37839 2024-11-22T04:35:13,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37839 2024-11-22T04:35:13,381 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37839 2024-11-22T04:35:13,383 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37839 2024-11-22T04:35:13,383 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37839 2024-11-22T04:35:13,394 DEBUG [M:0;8fc3ff0a63e6:46195 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;8fc3ff0a63e6:46195 2024-11-22T04:35:13,394 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/8fc3ff0a63e6,46195,1732250113188 2024-11-22T04:35:13,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:35:13,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:35:13,406 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/8fc3ff0a63e6,46195,1732250113188 2024-11-22T04:35:13,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T04:35:13,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:13,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:13,416 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T04:35:13,417 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/8fc3ff0a63e6,46195,1732250113188 from backup master directory 2024-11-22T04:35:13,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:35:13,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/8fc3ff0a63e6,46195,1732250113188 2024-11-22T04:35:13,426 WARN [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:35:13,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:35:13,426 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=8fc3ff0a63e6,46195,1732250113188 2024-11-22T04:35:13,432 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/hbase.id] with ID: c6e7d8ae-1eb4-433d-bed4-1d6028d0f9b4 2024-11-22T04:35:13,432 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/.tmp/hbase.id 2024-11-22T04:35:13,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43317 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:35:13,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40269 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:35:13,443 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/.tmp/hbase.id]:[hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/hbase.id] 2024-11-22T04:35:13,456 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:13,456 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T04:35:13,458 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-22T04:35:13,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:13,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:13,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43317 is added to blk_1073741827_1003 (size=196) 2024-11-22T04:35:13,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40269 is added to blk_1073741827_1003 (size=196) 2024-11-22T04:35:13,476 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T04:35:13,477 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T04:35:13,477 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:35:13,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43317 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:35:13,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40269 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:35:13,890 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store 2024-11-22T04:35:13,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40269 is added to blk_1073741829_1005 (size=34) 2024-11-22T04:35:13,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43317 is added to blk_1073741829_1005 (size=34) 2024-11-22T04:35:13,902 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:35:13,902 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T04:35:13,902 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:13,902 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:13,902 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T04:35:13,902 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:13,902 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:13,902 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732250113902Disabling compacts and flushes for region at 1732250113902Disabling writes for close at 1732250113902Writing region close event to WAL at 1732250113902Closed at 1732250113902 2024-11-22T04:35:13,903 WARN [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/.initializing 2024-11-22T04:35:13,904 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/WALs/8fc3ff0a63e6,46195,1732250113188 2024-11-22T04:35:13,907 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C46195%2C1732250113188, suffix=, logDir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/WALs/8fc3ff0a63e6,46195,1732250113188, archiveDir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/oldWALs, maxLogs=10 2024-11-22T04:35:13,908 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C46195%2C1732250113188.1732250113907 2024-11-22T04:35:13,913 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/WALs/8fc3ff0a63e6,46195,1732250113188/8fc3ff0a63e6%2C46195%2C1732250113188.1732250113907 2024-11-22T04:35:13,915 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40579:40579),(127.0.0.1/127.0.0.1:43935:43935)] 2024-11-22T04:35:13,916 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:35:13,916 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:35:13,916 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:13,916 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:13,917 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:13,919 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T04:35:13,919 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:13,920 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:13,920 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:13,922 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T04:35:13,922 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:13,922 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:35:13,922 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:13,924 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T04:35:13,924 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:13,925 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:35:13,925 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:13,926 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T04:35:13,926 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:13,927 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:35:13,927 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:13,928 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:13,928 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:13,930 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:13,930 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:13,930 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T04:35:13,932 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:35:13,934 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:35:13,935 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=722980, jitterRate=-0.08068442344665527}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T04:35:13,936 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732250113916Initializing all the Stores at 1732250113917 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250113917Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250113917Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250113917Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250113917Cleaning up temporary data from old regions at 1732250113930 (+13 ms)Region opened successfully at 1732250113936 (+6 ms) 2024-11-22T04:35:13,936 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T04:35:13,940 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7327c64d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:35:13,941 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T04:35:13,941 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T04:35:13,941 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T04:35:13,941 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T04:35:13,942 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T04:35:13,942 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T04:35:13,942 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T04:35:13,945 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T04:35:13,946 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T04:35:13,994 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T04:35:13,995 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T04:35:13,996 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T04:35:14,005 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T04:35:14,006 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T04:35:14,007 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T04:35:14,015 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T04:35:14,017 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T04:35:14,026 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T04:35:14,028 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T04:35:14,037 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T04:35:14,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T04:35:14,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T04:35:14,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:14,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:14,048 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=8fc3ff0a63e6,46195,1732250113188, sessionid=0x10160d3103f0000, setting cluster-up flag (Was=false) 2024-11-22T04:35:14,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:14,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:14,100 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T04:35:14,101 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8fc3ff0a63e6,46195,1732250113188 2024-11-22T04:35:14,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:14,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:14,153 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T04:35:14,154 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8fc3ff0a63e6,46195,1732250113188 2024-11-22T04:35:14,156 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T04:35:14,158 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T04:35:14,158 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T04:35:14,158 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T04:35:14,158 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 8fc3ff0a63e6,46195,1732250113188 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T04:35:14,160 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:35:14,160 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:35:14,160 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:35:14,160 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:35:14,160 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/8fc3ff0a63e6:0, corePoolSize=10, maxPoolSize=10 2024-11-22T04:35:14,161 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:14,161 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:35:14,161 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:14,161 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732250144161 2024-11-22T04:35:14,162 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T04:35:14,162 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T04:35:14,162 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T04:35:14,162 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T04:35:14,162 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T04:35:14,162 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T04:35:14,162 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:14,162 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T04:35:14,163 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T04:35:14,163 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T04:35:14,163 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:35:14,163 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T04:35:14,163 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T04:35:14,163 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T04:35:14,163 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250114163,5,FailOnTimeoutGroup] 2024-11-22T04:35:14,164 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250114163,5,FailOnTimeoutGroup] 2024-11-22T04:35:14,164 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:14,164 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T04:35:14,164 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:14,164 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:14,164 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:14,164 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T04:35:14,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43317 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:35:14,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40269 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:35:14,175 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T04:35:14,175 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0 2024-11-22T04:35:14,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43317 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:35:14,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40269 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:35:14,186 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(746): ClusterId : c6e7d8ae-1eb4-433d-bed4-1d6028d0f9b4 2024-11-22T04:35:14,186 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T04:35:14,186 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:35:14,187 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T04:35:14,189 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T04:35:14,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:14,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:14,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T04:35:14,192 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T04:35:14,192 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:14,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:14,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T04:35:14,195 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T04:35:14,195 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:14,195 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T04:35:14,195 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T04:35:14,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:14,196 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T04:35:14,198 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T04:35:14,198 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:14,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:14,199 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T04:35:14,200 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740 2024-11-22T04:35:14,200 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740 2024-11-22T04:35:14,201 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T04:35:14,201 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T04:35:14,202 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T04:35:14,203 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T04:35:14,206 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T04:35:14,206 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21f15e9e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:35:14,208 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:35:14,209 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=840873, jitterRate=0.0692259818315506}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T04:35:14,210 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732250114186Initializing all the Stores at 1732250114187 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250114187Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250114187Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250114187Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250114187Cleaning up temporary data from old regions at 1732250114202 (+15 ms)Region opened successfully at 1732250114210 (+8 ms) 2024-11-22T04:35:14,210 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T04:35:14,210 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T04:35:14,210 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T04:35:14,210 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T04:35:14,210 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T04:35:14,211 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T04:35:14,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732250114210Disabling compacts and flushes for region at 1732250114210Disabling writes for close at 1732250114210Writing region close event to WAL at 1732250114211 (+1 ms)Closed at 1732250114211 2024-11-22T04:35:14,213 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:35:14,213 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T04:35:14,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T04:35:14,215 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T04:35:14,216 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T04:35:14,218 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;8fc3ff0a63e6:37839 2024-11-22T04:35:14,218 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T04:35:14,218 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T04:35:14,218 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T04:35:14,219 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(2659): reportForDuty to master=8fc3ff0a63e6,46195,1732250113188 with port=37839, startcode=1732250113358 2024-11-22T04:35:14,219 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T04:35:14,221 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55913, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T04:35:14,222 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46195 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 8fc3ff0a63e6,37839,1732250113358 2024-11-22T04:35:14,222 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46195 {}] master.ServerManager(517): Registering regionserver=8fc3ff0a63e6,37839,1732250113358 2024-11-22T04:35:14,224 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0 2024-11-22T04:35:14,224 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39459 2024-11-22T04:35:14,224 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T04:35:14,237 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:35:14,237 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] zookeeper.ZKUtil(111): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8fc3ff0a63e6,37839,1732250113358 2024-11-22T04:35:14,237 WARN [RS:0;8fc3ff0a63e6:37839 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:35:14,238 INFO [RS:0;8fc3ff0a63e6:37839 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:35:14,238 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358 2024-11-22T04:35:14,238 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8fc3ff0a63e6,37839,1732250113358] 2024-11-22T04:35:14,242 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T04:35:14,246 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T04:35:14,246 INFO [RS:0;8fc3ff0a63e6:37839 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T04:35:14,247 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:14,247 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T04:35:14,248 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T04:35:14,248 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:14,248 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:14,248 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:14,248 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:14,248 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:14,248 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:14,248 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:35:14,248 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:14,248 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:14,248 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:14,249 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:14,249 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:14,249 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:14,249 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:35:14,249 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:35:14,249 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:14,249 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:14,249 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:14,250 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:14,250 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:14,250 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,37839,1732250113358-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:35:14,295 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T04:35:14,295 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,37839,1732250113358-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:14,295 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:14,296 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.Replication(171): 8fc3ff0a63e6,37839,1732250113358 started 2024-11-22T04:35:14,310 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:14,310 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(1482): Serving as 8fc3ff0a63e6,37839,1732250113358, RpcServer on 8fc3ff0a63e6/172.17.0.2:37839, sessionid=0x10160d3103f0001 2024-11-22T04:35:14,310 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T04:35:14,310 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8fc3ff0a63e6,37839,1732250113358 2024-11-22T04:35:14,310 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,37839,1732250113358' 2024-11-22T04:35:14,310 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T04:35:14,311 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T04:35:14,312 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T04:35:14,312 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T04:35:14,312 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8fc3ff0a63e6,37839,1732250113358 2024-11-22T04:35:14,312 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,37839,1732250113358' 2024-11-22T04:35:14,312 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T04:35:14,312 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T04:35:14,312 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T04:35:14,313 INFO [RS:0;8fc3ff0a63e6:37839 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T04:35:14,313 INFO [RS:0;8fc3ff0a63e6:37839 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T04:35:14,366 WARN [8fc3ff0a63e6:46195 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T04:35:14,415 INFO [RS:0;8fc3ff0a63e6:37839 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C37839%2C1732250113358, suffix=, logDir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358, archiveDir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/oldWALs, maxLogs=32 2024-11-22T04:35:14,416 INFO [RS:0;8fc3ff0a63e6:37839 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 2024-11-22T04:35:14,425 INFO [RS:0;8fc3ff0a63e6:37839 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 2024-11-22T04:35:14,426 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43935:43935),(127.0.0.1/127.0.0.1:40579:40579)] 2024-11-22T04:35:14,616 DEBUG [8fc3ff0a63e6:46195 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T04:35:14,617 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=8fc3ff0a63e6,37839,1732250113358 2024-11-22T04:35:14,619 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8fc3ff0a63e6,37839,1732250113358, state=OPENING 2024-11-22T04:35:14,679 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T04:35:14,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:14,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:14,692 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T04:35:14,692 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:35:14,692 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,37839,1732250113358}] 2024-11-22T04:35:14,692 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:35:14,848 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T04:35:14,853 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57597, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T04:35:14,860 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T04:35:14,861 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:35:14,864 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C37839%2C1732250113358.meta, suffix=.meta, logDir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358, archiveDir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/oldWALs, maxLogs=32 2024-11-22T04:35:14,864 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta 2024-11-22T04:35:14,873 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta 2024-11-22T04:35:14,877 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43935:43935),(127.0.0.1/127.0.0.1:40579:40579)] 2024-11-22T04:35:14,881 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:35:14,881 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T04:35:14,881 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T04:35:14,881 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T04:35:14,881 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T04:35:14,881 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:35:14,881 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T04:35:14,881 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T04:35:14,884 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T04:35:14,885 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T04:35:14,885 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:14,886 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:14,886 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T04:35:14,887 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T04:35:14,887 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:14,887 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:14,887 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T04:35:14,888 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T04:35:14,888 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:14,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:14,889 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T04:35:14,889 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T04:35:14,889 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:14,890 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:35:14,890 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T04:35:14,891 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740 2024-11-22T04:35:14,892 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740 2024-11-22T04:35:14,893 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T04:35:14,893 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T04:35:14,894 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T04:35:14,895 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T04:35:14,896 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=875382, jitterRate=0.11310607194900513}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T04:35:14,896 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T04:35:14,896 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732250114881Writing region info on filesystem at 1732250114881Initializing all the Stores at 1732250114882 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250114882Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250114884 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250114884Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250114884Cleaning up temporary data from old regions at 1732250114893 (+9 ms)Running coprocessor post-open hooks at 1732250114896 (+3 ms)Region opened successfully at 1732250114896 2024-11-22T04:35:14,897 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732250114848 2024-11-22T04:35:14,900 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T04:35:14,900 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T04:35:14,901 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=8fc3ff0a63e6,37839,1732250113358 2024-11-22T04:35:14,902 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8fc3ff0a63e6,37839,1732250113358, state=OPEN 2024-11-22T04:35:15,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T04:35:15,034 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T04:35:15,034 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,37839,1732250113358 2024-11-22T04:35:15,034 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:35:15,034 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:35:15,040 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T04:35:15,040 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,37839,1732250113358 in 342 msec 2024-11-22T04:35:15,044 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T04:35:15,044 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 827 msec 2024-11-22T04:35:15,045 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:35:15,045 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T04:35:15,047 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T04:35:15,047 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8fc3ff0a63e6,37839,1732250113358, seqNum=-1] 2024-11-22T04:35:15,047 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T04:35:15,048 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35619, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T04:35:15,054 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 896 msec 2024-11-22T04:35:15,054 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732250115054, completionTime=-1 2024-11-22T04:35:15,054 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T04:35:15,054 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T04:35:15,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T04:35:15,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732250175057 2024-11-22T04:35:15,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732250235057 2024-11-22T04:35:15,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T04:35:15,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,46195,1732250113188-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,46195,1732250113188-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,46195,1732250113188-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-8fc3ff0a63e6:46195, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,058 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,059 DEBUG [master/8fc3ff0a63e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T04:35:15,061 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.635sec 2024-11-22T04:35:15,061 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T04:35:15,061 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T04:35:15,062 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T04:35:15,062 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T04:35:15,062 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T04:35:15,062 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,46195,1732250113188-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:35:15,062 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,46195,1732250113188-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T04:35:15,064 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T04:35:15,064 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T04:35:15,064 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,46195,1732250113188-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,086 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44b90fe6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:35:15,086 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 8fc3ff0a63e6,46195,-1 for getting cluster id 2024-11-22T04:35:15,087 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T04:35:15,089 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c6e7d8ae-1eb4-433d-bed4-1d6028d0f9b4' 2024-11-22T04:35:15,089 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T04:35:15,090 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c6e7d8ae-1eb4-433d-bed4-1d6028d0f9b4" 2024-11-22T04:35:15,090 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@e9d99f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:35:15,090 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [8fc3ff0a63e6,46195,-1] 2024-11-22T04:35:15,090 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T04:35:15,091 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:15,093 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37246, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T04:35:15,094 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@58f0ef15, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:35:15,094 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T04:35:15,096 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8fc3ff0a63e6,37839,1732250113358, seqNum=-1] 2024-11-22T04:35:15,096 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T04:35:15,098 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60230, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T04:35:15,100 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=8fc3ff0a63e6,46195,1732250113188 2024-11-22T04:35:15,100 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:15,103 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T04:35:15,127 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:35:15,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:15,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:15,127 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:35:15,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:35:15,127 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:35:15,128 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T04:35:15,128 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:35:15,128 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38269 2024-11-22T04:35:15,130 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:38269 connecting to ZooKeeper ensemble=127.0.0.1:49807 2024-11-22T04:35:15,130 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:15,132 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:35:15,153 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:382690x0, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:35:15,153 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:382690x0, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-22T04:35:15,153 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:38269-0x10160d3103f0002 connected 2024-11-22T04:35:15,153 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-22T04:35:15,154 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T04:35:15,155 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T04:35:15,156 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:38269-0x10160d3103f0002, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T04:35:15,158 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:38269-0x10160d3103f0002, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:35:15,160 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38269 2024-11-22T04:35:15,160 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38269 2024-11-22T04:35:15,161 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38269 2024-11-22T04:35:15,161 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38269 2024-11-22T04:35:15,164 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38269 2024-11-22T04:35:15,166 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.HRegionServer(746): ClusterId : c6e7d8ae-1eb4-433d-bed4-1d6028d0f9b4 2024-11-22T04:35:15,166 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T04:35:15,174 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T04:35:15,175 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T04:35:15,185 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T04:35:15,185 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@31bec1e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:35:15,196 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;8fc3ff0a63e6:38269 2024-11-22T04:35:15,196 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T04:35:15,196 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T04:35:15,196 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T04:35:15,197 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.HRegionServer(2659): reportForDuty to master=8fc3ff0a63e6,46195,1732250113188 with port=38269, startcode=1732250115127 2024-11-22T04:35:15,197 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T04:35:15,198 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40301, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T04:35:15,199 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46195 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 8fc3ff0a63e6,38269,1732250115127 2024-11-22T04:35:15,199 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46195 {}] master.ServerManager(517): Registering regionserver=8fc3ff0a63e6,38269,1732250115127 2024-11-22T04:35:15,200 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0 2024-11-22T04:35:15,200 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:39459 2024-11-22T04:35:15,200 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T04:35:15,212 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:35:15,213 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] zookeeper.ZKUtil(111): regionserver:38269-0x10160d3103f0002, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8fc3ff0a63e6,38269,1732250115127 2024-11-22T04:35:15,213 WARN [RS:1;8fc3ff0a63e6:38269 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:35:15,213 INFO [RS:1;8fc3ff0a63e6:38269 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:35:15,213 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127 2024-11-22T04:35:15,213 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8fc3ff0a63e6,38269,1732250115127] 2024-11-22T04:35:15,217 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T04:35:15,219 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T04:35:15,220 INFO [RS:1;8fc3ff0a63e6:38269 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T04:35:15,220 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,220 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T04:35:15,221 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T04:35:15,222 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,222 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:15,222 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:15,222 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:15,222 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:15,222 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:15,222 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:35:15,222 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:15,222 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:15,223 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:15,223 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:15,223 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:15,223 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:35:15,223 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:35:15,223 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:35:15,224 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,224 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,224 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,224 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,224 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,224 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,38269,1732250115127-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:35:15,240 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T04:35:15,240 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,38269,1732250115127-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,240 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,240 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.Replication(171): 8fc3ff0a63e6,38269,1732250115127 started 2024-11-22T04:35:15,253 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:35:15,253 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.HRegionServer(1482): Serving as 8fc3ff0a63e6,38269,1732250115127, RpcServer on 8fc3ff0a63e6/172.17.0.2:38269, sessionid=0x10160d3103f0002 2024-11-22T04:35:15,253 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T04:35:15,253 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8fc3ff0a63e6,38269,1732250115127 2024-11-22T04:35:15,253 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;8fc3ff0a63e6:38269,5,FailOnTimeoutGroup] 2024-11-22T04:35:15,253 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,38269,1732250115127' 2024-11-22T04:35:15,253 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T04:35:15,254 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-22T04:35:15,254 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T04:35:15,254 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T04:35:15,255 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T04:35:15,255 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T04:35:15,255 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8fc3ff0a63e6,38269,1732250115127 2024-11-22T04:35:15,255 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,38269,1732250115127' 2024-11-22T04:35:15,255 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T04:35:15,255 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 8fc3ff0a63e6,46195,1732250113188 2024-11-22T04:35:15,255 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T04:35:15,255 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@57a891e5 2024-11-22T04:35:15,255 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T04:35:15,255 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T04:35:15,255 INFO [RS:1;8fc3ff0a63e6:38269 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T04:35:15,255 INFO [RS:1;8fc3ff0a63e6:38269 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T04:35:15,257 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37248, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T04:35:15,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46195 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T04:35:15,257 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46195 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T04:35:15,258 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46195 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T04:35:15,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46195 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T04:35:15,260 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T04:35:15,260 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:15,260 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46195 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-22T04:35:15,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46195 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T04:35:15,261 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T04:35:15,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40269 is added to blk_1073741835_1011 (size=393) 2024-11-22T04:35:15,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43317 is added to blk_1073741835_1011 (size=393) 2024-11-22T04:35:15,269 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a35ad475da12a258dad9be3d792a837c, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0 2024-11-22T04:35:15,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43317 is added to blk_1073741836_1012 (size=76) 2024-11-22T04:35:15,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40269 is added to blk_1073741836_1012 (size=76) 2024-11-22T04:35:15,278 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:35:15,278 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing a35ad475da12a258dad9be3d792a837c, disabling compactions & flushes 2024-11-22T04:35:15,278 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:15,278 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:15,278 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. after waiting 0 ms 2024-11-22T04:35:15,278 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:15,278 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:15,278 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for a35ad475da12a258dad9be3d792a837c: Waiting for close lock at 1732250115278Disabling compacts and flushes for region at 1732250115278Disabling writes for close at 1732250115278Writing region close event to WAL at 1732250115278Closed at 1732250115278 2024-11-22T04:35:15,280 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T04:35:15,280 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732250115280"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732250115280"}]},"ts":"1732250115280"} 2024-11-22T04:35:15,283 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T04:35:15,284 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T04:35:15,285 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732250115285"}]},"ts":"1732250115285"} 2024-11-22T04:35:15,287 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-22T04:35:15,288 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a35ad475da12a258dad9be3d792a837c, ASSIGN}] 2024-11-22T04:35:15,289 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a35ad475da12a258dad9be3d792a837c, ASSIGN 2024-11-22T04:35:15,291 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a35ad475da12a258dad9be3d792a837c, ASSIGN; state=OFFLINE, location=8fc3ff0a63e6,37839,1732250113358; forceNewPlan=false, retain=false 2024-11-22T04:35:15,358 INFO [RS:1;8fc3ff0a63e6:38269 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C38269%2C1732250115127, suffix=, logDir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127, archiveDir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/oldWALs, maxLogs=32 2024-11-22T04:35:15,358 INFO [RS:1;8fc3ff0a63e6:38269 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 2024-11-22T04:35:15,364 INFO [RS:1;8fc3ff0a63e6:38269 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 2024-11-22T04:35:15,366 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40579:40579),(127.0.0.1/127.0.0.1:43935:43935)] 2024-11-22T04:35:15,441 INFO [8fc3ff0a63e6:46195 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-22T04:35:15,442 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a35ad475da12a258dad9be3d792a837c, regionState=OPENING, regionLocation=8fc3ff0a63e6,37839,1732250113358 2024-11-22T04:35:15,446 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a35ad475da12a258dad9be3d792a837c, ASSIGN because future has completed 2024-11-22T04:35:15,447 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a35ad475da12a258dad9be3d792a837c, server=8fc3ff0a63e6,37839,1732250113358}] 2024-11-22T04:35:15,611 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:15,611 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a35ad475da12a258dad9be3d792a837c, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:35:15,612 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:15,612 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:35:15,612 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:15,612 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:15,614 INFO [StoreOpener-a35ad475da12a258dad9be3d792a837c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:15,615 INFO [StoreOpener-a35ad475da12a258dad9be3d792a837c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a35ad475da12a258dad9be3d792a837c columnFamilyName info 2024-11-22T04:35:15,615 DEBUG [StoreOpener-a35ad475da12a258dad9be3d792a837c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:35:15,616 INFO [StoreOpener-a35ad475da12a258dad9be3d792a837c-1 {}] regionserver.HStore(327): Store=a35ad475da12a258dad9be3d792a837c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:35:15,616 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:15,617 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:15,617 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:15,618 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:15,618 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:15,620 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:15,622 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:35:15,623 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a35ad475da12a258dad9be3d792a837c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=709147, jitterRate=-0.09827306866645813}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T04:35:15,623 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:15,624 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a35ad475da12a258dad9be3d792a837c: Running coprocessor pre-open hook at 1732250115612Writing region info on filesystem at 1732250115612Initializing all the Stores at 1732250115613 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250115613Cleaning up temporary data from old regions at 1732250115618 (+5 ms)Running coprocessor post-open hooks at 1732250115623 (+5 ms)Region opened successfully at 1732250115624 (+1 ms) 2024-11-22T04:35:15,625 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c., pid=6, masterSystemTime=1732250115603 2024-11-22T04:35:15,627 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:15,627 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:15,628 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a35ad475da12a258dad9be3d792a837c, regionState=OPEN, openSeqNum=2, regionLocation=8fc3ff0a63e6,37839,1732250113358 2024-11-22T04:35:15,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a35ad475da12a258dad9be3d792a837c, server=8fc3ff0a63e6,37839,1732250113358 because future has completed 2024-11-22T04:35:15,634 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T04:35:15,634 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a35ad475da12a258dad9be3d792a837c, server=8fc3ff0a63e6,37839,1732250113358 in 185 msec 2024-11-22T04:35:15,637 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T04:35:15,637 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a35ad475da12a258dad9be3d792a837c, ASSIGN in 346 msec 2024-11-22T04:35:15,638 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T04:35:15,638 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732250115638"}]},"ts":"1732250115638"} 2024-11-22T04:35:15,640 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-22T04:35:15,641 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T04:35:15,643 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 383 msec 2024-11-22T04:35:20,387 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T04:35:20,392 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:20,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:20,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:20,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:20,419 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T04:35:20,419 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T04:35:20,420 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T04:35:20,420 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-22T04:35:20,420 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-22T04:35:25,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46195 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T04:35:25,360 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-22T04:35:25,360 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-22T04:35:25,370 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T04:35:25,370 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:25,381 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:35:25,384 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:35:25,384 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:35:25,384 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:35:25,384 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T04:35:25,385 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@d929f15{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:35:25,385 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@698a5081{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:35:25,479 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@624ef820{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/java.io.tmpdir/jetty-localhost-39349-hadoop-hdfs-3_4_1-tests_jar-_-any-6359128931165735674/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:25,480 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@12827689{HTTP/1.1, (http/1.1)}{localhost:39349} 2024-11-22T04:35:25,480 INFO [Time-limited test {}] server.Server(415): Started @122746ms 2024-11-22T04:35:25,481 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:35:25,511 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:35:25,514 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:35:25,515 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:35:25,515 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:35:25,515 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:35:25,516 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78773b90{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:35:25,516 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@99a03ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:35:25,607 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5fb4bc9e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/java.io.tmpdir/jetty-localhost-43309-hadoop-hdfs-3_4_1-tests_jar-_-any-13782574708002445360/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:25,608 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69248046{HTTP/1.1, (http/1.1)}{localhost:43309} 2024-11-22T04:35:25,608 INFO [Time-limited test {}] server.Server(415): Started @122874ms 2024-11-22T04:35:25,609 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:35:25,640 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:35:25,643 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:35:25,644 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:35:25,644 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:35:25,644 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T04:35:25,644 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28f4e296{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:35:25,645 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2e234cf7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:35:25,739 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1191c470{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/java.io.tmpdir/jetty-localhost-39183-hadoop-hdfs-3_4_1-tests_jar-_-any-9522144512050485225/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:25,739 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1944332e{HTTP/1.1, (http/1.1)}{localhost:39183} 2024-11-22T04:35:25,739 INFO [Time-limited test {}] server.Server(415): Started @123006ms 2024-11-22T04:35:25,740 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:35:27,141 WARN [Thread-868 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6/current/BP-2085118315-172.17.0.2-1732250110796/current, will proceed with Du for space computation calculation, 2024-11-22T04:35:27,141 WARN [Thread-867 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5/current/BP-2085118315-172.17.0.2-1732250110796/current, will proceed with Du for space computation calculation, 2024-11-22T04:35:27,161 WARN [Thread-808 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:35:27,164 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd75d81f7c4f00cc0 with lease ID 0xa7190061b2006baa: Processing first storage report for DS-6f380372-c9f9-4f4b-966e-64c38a3051b4 from datanode DatanodeRegistration(127.0.0.1:39493, datanodeUuid=154db80d-b446-4abe-899a-1ee90a38892f, infoPort=44795, infoSecurePort=0, ipcPort=36607, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796) 2024-11-22T04:35:27,164 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd75d81f7c4f00cc0 with lease ID 0xa7190061b2006baa: from storage DS-6f380372-c9f9-4f4b-966e-64c38a3051b4 node DatanodeRegistration(127.0.0.1:39493, datanodeUuid=154db80d-b446-4abe-899a-1ee90a38892f, infoPort=44795, infoSecurePort=0, ipcPort=36607, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:35:27,164 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd75d81f7c4f00cc0 with lease ID 0xa7190061b2006baa: Processing first storage report for DS-fa55e751-0269-448d-a929-c75a0c9ffda1 from datanode DatanodeRegistration(127.0.0.1:39493, datanodeUuid=154db80d-b446-4abe-899a-1ee90a38892f, infoPort=44795, infoSecurePort=0, ipcPort=36607, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796) 2024-11-22T04:35:27,164 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd75d81f7c4f00cc0 with lease ID 0xa7190061b2006baa: from storage DS-fa55e751-0269-448d-a929-c75a0c9ffda1 node DatanodeRegistration(127.0.0.1:39493, datanodeUuid=154db80d-b446-4abe-899a-1ee90a38892f, infoPort=44795, infoSecurePort=0, ipcPort=36607, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:35:27,440 WARN [Thread-878 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data7/current/BP-2085118315-172.17.0.2-1732250110796/current, will proceed with Du for space computation calculation, 2024-11-22T04:35:27,440 WARN [Thread-879 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data8/current/BP-2085118315-172.17.0.2-1732250110796/current, will proceed with Du for space computation calculation, 2024-11-22T04:35:27,461 WARN [Thread-830 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:35:27,463 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3a4382e292cc36bf with lease ID 0xa7190061b2006bab: Processing first storage report for DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2 from datanode DatanodeRegistration(127.0.0.1:38827, datanodeUuid=6a922c9f-1837-4f54-a9f6-c99effa898d1, infoPort=35733, infoSecurePort=0, ipcPort=38087, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796) 2024-11-22T04:35:27,463 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3a4382e292cc36bf with lease ID 0xa7190061b2006bab: from storage DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2 node DatanodeRegistration(127.0.0.1:38827, datanodeUuid=6a922c9f-1837-4f54-a9f6-c99effa898d1, infoPort=35733, infoSecurePort=0, ipcPort=38087, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T04:35:27,463 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x3a4382e292cc36bf with lease ID 0xa7190061b2006bab: Processing first storage report for DS-00810a4f-a9af-41bf-96bc-ed3d4f9add4b from datanode DatanodeRegistration(127.0.0.1:38827, datanodeUuid=6a922c9f-1837-4f54-a9f6-c99effa898d1, infoPort=35733, infoSecurePort=0, ipcPort=38087, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796) 2024-11-22T04:35:27,463 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x3a4382e292cc36bf with lease ID 0xa7190061b2006bab: from storage DS-00810a4f-a9af-41bf-96bc-ed3d4f9add4b node DatanodeRegistration(127.0.0.1:38827, datanodeUuid=6a922c9f-1837-4f54-a9f6-c99effa898d1, infoPort=35733, infoSecurePort=0, ipcPort=38087, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:35:27,487 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data9/current/BP-2085118315-172.17.0.2-1732250110796/current, will proceed with Du for space computation calculation, 2024-11-22T04:35:27,487 WARN [Thread-890 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data10/current/BP-2085118315-172.17.0.2-1732250110796/current, will proceed with Du for space computation calculation, 2024-11-22T04:35:27,510 WARN [Thread-852 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:35:27,512 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6beede3c92e9a90d with lease ID 0xa7190061b2006bac: Processing first storage report for DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3 from datanode DatanodeRegistration(127.0.0.1:35675, datanodeUuid=14d20cda-2ccf-44b6-9a52-f1eee118f485, infoPort=39147, infoSecurePort=0, ipcPort=38931, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796) 2024-11-22T04:35:27,512 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6beede3c92e9a90d with lease ID 0xa7190061b2006bac: from storage DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3 node DatanodeRegistration(127.0.0.1:35675, datanodeUuid=14d20cda-2ccf-44b6-9a52-f1eee118f485, infoPort=39147, infoSecurePort=0, ipcPort=38931, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:35:27,512 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6beede3c92e9a90d with lease ID 0xa7190061b2006bac: Processing first storage report for DS-59920ab4-c4a1-433a-ae53-5e300df20933 from datanode DatanodeRegistration(127.0.0.1:35675, datanodeUuid=14d20cda-2ccf-44b6-9a52-f1eee118f485, infoPort=39147, infoSecurePort=0, ipcPort=38931, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796) 2024-11-22T04:35:27,512 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6beede3c92e9a90d with lease ID 0xa7190061b2006bac: from storage DS-59920ab4-c4a1-433a-ae53-5e300df20933 node DatanodeRegistration(127.0.0.1:35675, datanodeUuid=14d20cda-2ccf-44b6-9a52-f1eee118f485, infoPort=39147, infoSecurePort=0, ipcPort=38931, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:35:27,579 WARN [ResponseProcessor for block BP-2085118315-172.17.0.2-1732250110796:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2085118315-172.17.0.2-1732250110796:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:27,579 WARN [ResponseProcessor for block BP-2085118315-172.17.0.2-1732250110796:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2085118315-172.17.0.2-1732250110796:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:27,579 WARN [ResponseProcessor for block BP-2085118315-172.17.0.2-1732250110796:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2085118315-172.17.0.2-1732250110796:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-2085118315-172.17.0.2-1732250110796:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:27,580 WARN [DataStreamer for file /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta block BP-2085118315-172.17.0.2-1732250110796:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK], DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]) is bad. 2024-11-22T04:35:27,580 WARN [DataStreamer for file /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 block BP-2085118315-172.17.0.2-1732250110796:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK], DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]) is bad. 2024-11-22T04:35:27,580 WARN [DataStreamer for file /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/WALs/8fc3ff0a63e6,46195,1732250113188/8fc3ff0a63e6%2C46195%2C1732250113188.1732250113907 block BP-2085118315-172.17.0.2-1732250110796:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK], DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]) is bad. 2024-11-22T04:35:27,580 WARN [ResponseProcessor for block BP-2085118315-172.17.0.2-1732250110796:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2085118315-172.17.0.2-1732250110796:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-2085118315-172.17.0.2-1732250110796:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:27,581 WARN [DataStreamer for file /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 block BP-2085118315-172.17.0.2-1732250110796:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK], DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]) is bad. 2024-11-22T04:35:27,580 WARN [PacketResponder: BP-2085118315-172.17.0.2-1732250110796:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43317] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:27,581 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:48230 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:43317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48230 dst: /127.0.0.1:43317 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:27,582 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:35574 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40269:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35574 dst: /127.0.0.1:40269 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:27,581 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:48222 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:43317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48222 dst: /127.0.0.1:43317 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:27,582 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:35564 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40269:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35564 dst: /127.0.0.1:40269 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:27,582 WARN [PacketResponder: BP-2085118315-172.17.0.2-1732250110796:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43317] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:27,583 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_29404711_22 at /127.0.0.1:48182 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:43317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48182 dst: /127.0.0.1:43317 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:27,583 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1341494519_22 at /127.0.0.1:35586 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40269:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35586 dst: /127.0.0.1:40269 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:27,584 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a9565fd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:27,584 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_29404711_22 at /127.0.0.1:35528 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40269:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35528 dst: /127.0.0.1:40269 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:27,584 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1341494519_22 at /127.0.0.1:48256 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:43317:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48256 dst: /127.0.0.1:43317 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:27,585 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@115f1319{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:35:27,585 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:35:27,585 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1baf7059{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:35:27,585 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3574ce3f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir/,STOPPED} 2024-11-22T04:35:27,589 WARN [BP-2085118315-172.17.0.2-1732250110796 heartbeating to localhost/127.0.0.1:39459 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:35:27,589 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:35:27,589 WARN [BP-2085118315-172.17.0.2-1732250110796 heartbeating to localhost/127.0.0.1:39459 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2085118315-172.17.0.2-1732250110796 (Datanode Uuid af0b0e36-61ae-483f-a85d-ba85a4dacb3f) service to localhost/127.0.0.1:39459 2024-11-22T04:35:27,589 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:35:27,590 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data3/current/BP-2085118315-172.17.0.2-1732250110796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:27,590 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data4/current/BP-2085118315-172.17.0.2-1732250110796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:27,590 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:35:27,591 WARN [DataStreamer for file /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/WALs/8fc3ff0a63e6,46195,1732250113188/8fc3ff0a63e6%2C46195%2C1732250113188.1732250113907 block BP-2085118315-172.17.0.2-1732250110796:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:27,591 WARN [DataStreamer for file /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 block BP-2085118315-172.17.0.2-1732250110796:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:27,591 WARN [DataStreamer for file /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta block BP-2085118315-172.17.0.2-1732250110796:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:27,592 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@30d47bef {}] datanode.DataXceiver(331): 127.0.0.1:40269:DataXceiver error processing unknown operation src: /127.0.0.1:50850 dst: /127.0.0.1:40269 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:27,593 WARN [DataStreamer for file /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 block BP-2085118315-172.17.0.2-1732250110796:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:27,607 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@144f8866{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:27,607 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1e0a663b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:35:27,607 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:35:27,607 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f14f219{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:35:27,608 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4d2d9832{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir/,STOPPED} 2024-11-22T04:35:27,608 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:35:27,608 WARN [BP-2085118315-172.17.0.2-1732250110796 heartbeating to localhost/127.0.0.1:39459 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:35:27,608 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:35:27,608 WARN [BP-2085118315-172.17.0.2-1732250110796 heartbeating to localhost/127.0.0.1:39459 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2085118315-172.17.0.2-1732250110796 (Datanode Uuid 014235c3-84e3-4eb9-af71-a852bd8908e7) service to localhost/127.0.0.1:39459 2024-11-22T04:35:27,609 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data1/current/BP-2085118315-172.17.0.2-1732250110796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:27,609 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data2/current/BP-2085118315-172.17.0.2-1732250110796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:27,609 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:35:27,615 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c., hostname=8fc3ff0a63e6,37839,1732250113358, seqNum=2] 2024-11-22T04:35:27,617 ERROR [FSHLog-0-hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0-prefix:8fc3ff0a63e6,37839,1732250113358 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:27,617 WARN [FSHLog-0-hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0-prefix:8fc3ff0a63e6,37839,1732250113358 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:27,617 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:27,617 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C37839%2C1732250113358:(num 1732250114416) roll requested 2024-11-22T04:35:27,617 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C37839%2C1732250113358.1732250127617 2024-11-22T04:35:27,623 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:27,623 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:27,623 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:27,624 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:27,624 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:27,624 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250127617 2024-11-22T04:35:27,624 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:27,624 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:27,625 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39147:39147),(127.0.0.1/127.0.0.1:35733:35733)] 2024-11-22T04:35:27,625 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 is not closed yet, will try archiving it next time 2024-11-22T04:35:27,625 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-22T04:35:27,626 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-22T04:35:27,626 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 2024-11-22T04:35:27,628 WARN [IPC Server handler 2 on default port 39459 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-22T04:35:27,632 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 after 4ms 2024-11-22T04:35:29,225 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:29,478 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:29,625 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:29,627 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250127617 2024-11-22T04:35:29,627 WARN [ResponseProcessor for block BP-2085118315-172.17.0.2-1732250110796:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2085118315-172.17.0.2-1732250110796:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:29,628 WARN [DataStreamer for file /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250127617 block BP-2085118315-172.17.0.2-1732250110796:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK], DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:29,628 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:37330 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:35675:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37330 dst: /127.0.0.1:35675 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:29,628 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:34280 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:38827:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34280 dst: /127.0.0.1:38827 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:29,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1191c470{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:29,668 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1944332e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:35:29,668 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:35:29,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2e234cf7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:35:29,668 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28f4e296{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir/,STOPPED} 2024-11-22T04:35:29,670 WARN [BP-2085118315-172.17.0.2-1732250110796 heartbeating to localhost/127.0.0.1:39459 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:35:29,670 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:35:29,670 WARN [BP-2085118315-172.17.0.2-1732250110796 heartbeating to localhost/127.0.0.1:39459 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2085118315-172.17.0.2-1732250110796 (Datanode Uuid 14d20cda-2ccf-44b6-9a52-f1eee118f485) service to localhost/127.0.0.1:39459 2024-11-22T04:35:29,670 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:35:29,670 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data9/current/BP-2085118315-172.17.0.2-1732250110796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:29,670 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data10/current/BP-2085118315-172.17.0.2-1732250110796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:29,671 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:35:31,225 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:31,479 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:31,626 WARN [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]] 2024-11-22T04:35:31,626 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:31,626 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C37839%2C1732250113358:(num 1732250127617) roll requested 2024-11-22T04:35:31,627 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C37839%2C1732250113358.1732250131627 2024-11-22T04:35:31,633 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 after 4007ms 2024-11-22T04:35:31,634 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:31,634 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK], DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]) is bad. 2024-11-22T04:35:31,634 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741839_1021 2024-11-22T04:35:31,637 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK] 2024-11-22T04:35:31,640 WARN [Thread-910 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:31,640 WARN [Thread-910 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK], DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:31,640 WARN [Thread-910 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741840_1022 2024-11-22T04:35:31,641 WARN [Thread-910 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:31,646 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:31,646 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:31,646 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:31,647 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:31,647 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:31,647 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250127617 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250131627 2024-11-22T04:35:31,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38827 is added to blk_1073741838_1020 (size=2431) 2024-11-22T04:35:31,656 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44795:44795),(127.0.0.1/127.0.0.1:35733:35733)] 2024-11-22T04:35:31,656 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 is not closed yet, will try archiving it next time 2024-11-22T04:35:31,656 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250127617 is not closed yet, will try archiving it next time 2024-11-22T04:35:31,676 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T04:35:32,055 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 is not closed yet, will try archiving it next time 2024-11-22T04:35:33,225 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:33,476 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4cd5acf1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:38827, datanodeUuid=6a922c9f-1837-4f54-a9f6-c99effa898d1, infoPort=35733, infoSecurePort=0, ipcPort=38087, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796):Failed to transfer BP-2085118315-172.17.0.2-1732250110796:blk_1073741838_1020 to 127.0.0.1:43317 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:33,479 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:33,657 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:33,680 WARN [ResponseProcessor for block BP-2085118315-172.17.0.2-1732250110796:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-2085118315-172.17.0.2-1732250110796:blk_1073741841_1023 java.io.IOException: Bad response ERROR for BP-2085118315-172.17.0.2-1732250110796:blk_1073741841_1023 from datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:33,681 WARN [DataStreamer for file /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250131627 block BP-2085118315-172.17.0.2-1732250110796:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:33,681 WARN [PacketResponder: BP-2085118315-172.17.0.2-1732250110796:blk_1073741841_1023, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:38827] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:33,681 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:51946 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51946 dst: /127.0.0.1:39493 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:33,681 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:34288 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:38827:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34288 dst: /127.0.0.1:38827 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:33,736 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5fb4bc9e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:33,736 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69248046{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:35:33,736 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:35:33,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@99a03ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:35:33,737 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78773b90{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir/,STOPPED} 2024-11-22T04:35:33,738 WARN [BP-2085118315-172.17.0.2-1732250110796 heartbeating to localhost/127.0.0.1:39459 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:35:33,738 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:35:33,738 WARN [BP-2085118315-172.17.0.2-1732250110796 heartbeating to localhost/127.0.0.1:39459 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2085118315-172.17.0.2-1732250110796 (Datanode Uuid 6a922c9f-1837-4f54-a9f6-c99effa898d1) service to localhost/127.0.0.1:39459 2024-11-22T04:35:33,738 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:35:33,739 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data7/current/BP-2085118315-172.17.0.2-1732250110796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:33,739 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data8/current/BP-2085118315-172.17.0.2-1732250110796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:35:33,739 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:35:33,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37839 {}] regionserver.HRegion(8855): Flush requested on a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:33,752 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a35ad475da12a258dad9be3d792a837c 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T04:35:33,774 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/cd54c7c18842474fb8750331be1c9494 is 1080, key is row0002/info:/1732250129672/Put/seqid=0 2024-11-22T04:35:33,777 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:33,777 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK], DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:33,777 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741842_1025 2024-11-22T04:35:33,778 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:33,780 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:33,780 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]) is bad. 2024-11-22T04:35:33,780 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741843_1026 2024-11-22T04:35:33,781 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK] 2024-11-22T04:35:33,783 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35675 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:33,783 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41358 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6]'}, localName='127.0.0.1:39493', datanodeUuid='154db80d-b446-4abe-899a-1ee90a38892f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741844_1027 to mirror 127.0.0.1:35675 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:33,784 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:33,784 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41358 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T04:35:33,784 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741844_1027 2024-11-22T04:35:33,784 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41358 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41358 dst: /127.0.0.1:39493 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:33,785 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK] 2024-11-22T04:35:33,788 WARN [Thread-920 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38827 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:33,788 WARN [Thread-920 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:33,787 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41370 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6]'}, localName='127.0.0.1:39493', datanodeUuid='154db80d-b446-4abe-899a-1ee90a38892f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741845_1028 to mirror 127.0.0.1:38827 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:33,788 WARN [Thread-920 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741845_1028 2024-11-22T04:35:33,788 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41370 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T04:35:33,788 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41370 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41370 dst: /127.0.0.1:39493 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:33,789 WARN [Thread-920 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:33,790 WARN [IPC Server handler 3 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T04:35:33,790 WARN [IPC Server handler 3 on default port 39459 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T04:35:33,790 WARN [IPC Server handler 3 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T04:35:33,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741846_1029 (size=10347) 2024-11-22T04:35:34,195 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/cd54c7c18842474fb8750331be1c9494 2024-11-22T04:35:34,209 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/cd54c7c18842474fb8750331be1c9494 as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/cd54c7c18842474fb8750331be1c9494 2024-11-22T04:35:34,216 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/cd54c7c18842474fb8750331be1c9494, entries=5, sequenceid=11, filesize=10.1 K 2024-11-22T04:35:34,217 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for a35ad475da12a258dad9be3d792a837c in 465ms, sequenceid=11, compaction requested=false 2024-11-22T04:35:34,217 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a35ad475da12a258dad9be3d792a837c: 2024-11-22T04:35:34,388 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37839 {}] regionserver.HRegion(8855): Flush requested on a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:34,388 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a35ad475da12a258dad9be3d792a837c 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-22T04:35:34,394 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/e94a0027676d47f59e097ced938bada7 is 1080, key is row0007/info:/1732250133754/Put/seqid=0 2024-11-22T04:35:34,396 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:34,396 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:34,396 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741847_1030 2024-11-22T04:35:34,397 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:34,399 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:34,399 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK], DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:34,399 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741848_1031 2024-11-22T04:35:34,402 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:34,404 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:34,405 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]) is bad. 2024-11-22T04:35:34,405 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741849_1032 2024-11-22T04:35:34,406 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK] 2024-11-22T04:35:34,407 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:34,408 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:34,408 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741850_1033 2024-11-22T04:35:34,408 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK] 2024-11-22T04:35:34,409 WARN [IPC Server handler 2 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T04:35:34,409 WARN [IPC Server handler 2 on default port 39459 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T04:35:34,410 WARN [IPC Server handler 2 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T04:35:34,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741851_1034 (size=12506) 2024-11-22T04:35:34,422 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/e94a0027676d47f59e097ced938bada7 2024-11-22T04:35:34,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/e94a0027676d47f59e097ced938bada7 as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/e94a0027676d47f59e097ced938bada7 2024-11-22T04:35:34,438 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/e94a0027676d47f59e097ced938bada7, entries=7, sequenceid=24, filesize=12.2 K 2024-11-22T04:35:34,439 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=0 B/0 for a35ad475da12a258dad9be3d792a837c in 51ms, sequenceid=24, compaction requested=false 2024-11-22T04:35:34,440 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a35ad475da12a258dad9be3d792a837c: 2024-11-22T04:35:34,440 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-22T04:35:34,440 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:35:34,440 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/e94a0027676d47f59e097ced938bada7 because midkey is the same as first or last row 2024-11-22T04:35:35,226 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:35,479 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:35,657 WARN [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]] 2024-11-22T04:35:35,657 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:35,657 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C37839%2C1732250113358:(num 1732250131627) roll requested 2024-11-22T04:35:35,658 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C37839%2C1732250113358.1732250135657 2024-11-22T04:35:35,662 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40269 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:35,662 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41426 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6]'}, localName='127.0.0.1:39493', datanodeUuid='154db80d-b446-4abe-899a-1ee90a38892f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741852_1035 to mirror 127.0.0.1:40269 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:35,662 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:35,662 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41426 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T04:35:35,662 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741852_1035 2024-11-22T04:35:35,662 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41426 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41426 dst: /127.0.0.1:39493 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:35,664 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:35,667 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35675 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:35,667 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41438 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6]'}, localName='127.0.0.1:39493', datanodeUuid='154db80d-b446-4abe-899a-1ee90a38892f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741853_1036 to mirror 127.0.0.1:35675 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:35,667 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:35,667 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741853_1036 2024-11-22T04:35:35,667 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41438 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T04:35:35,667 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41438 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41438 dst: /127.0.0.1:39493 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:35,668 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK] 2024-11-22T04:35:35,669 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:35,669 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK], DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]) is bad. 2024-11-22T04:35:35,669 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741854_1037 2024-11-22T04:35:35,670 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK] 2024-11-22T04:35:35,671 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:35,672 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:35,672 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741855_1038 2024-11-22T04:35:35,672 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:35,673 WARN [IPC Server handler 3 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T04:35:35,673 WARN [IPC Server handler 3 on default port 39459 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T04:35:35,673 WARN [IPC Server handler 3 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T04:35:35,676 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:35,676 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:35,676 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:35,676 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:35,676 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:35,677 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250131627 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250135657 2024-11-22T04:35:35,678 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44795:44795)] 2024-11-22T04:35:35,678 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 is not closed yet, will try archiving it next time 2024-11-22T04:35:35,678 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250131627 is not closed yet, will try archiving it next time 2024-11-22T04:35:35,678 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250127617 to hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/oldWALs/8fc3ff0a63e6%2C37839%2C1732250113358.1732250127617 2024-11-22T04:35:35,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741841_1024 (size=25992) 2024-11-22T04:35:35,679 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 is not closed yet, will try archiving it next time 2024-11-22T04:35:35,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37839 {}] regionserver.HRegion(8855): Flush requested on a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:35,809 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a35ad475da12a258dad9be3d792a837c 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T04:35:35,814 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/63fd947ea4c449028775699f94927477 is 1079, key is tmprow/info:/1732250135808/Put/seqid=0 2024-11-22T04:35:35,816 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:35,816 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK], DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]) is bad. 2024-11-22T04:35:35,816 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741857_1040 2024-11-22T04:35:35,817 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK] 2024-11-22T04:35:35,818 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:35,818 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:35,818 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741858_1041 2024-11-22T04:35:35,819 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:35,820 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:35,821 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:35,821 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741859_1042 2024-11-22T04:35:35,821 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:35,823 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:35,823 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:35,823 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741860_1043 2024-11-22T04:35:35,824 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK] 2024-11-22T04:35:35,825 WARN [IPC Server handler 0 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T04:35:35,825 WARN [IPC Server handler 0 on default port 39459 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T04:35:35,825 WARN [IPC Server handler 0 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T04:35:35,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741861_1044 (size=6027) 2024-11-22T04:35:36,167 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6c95d0e1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39493, datanodeUuid=154db80d-b446-4abe-899a-1ee90a38892f, infoPort=44795, infoSecurePort=0, ipcPort=36607, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796):Failed to transfer BP-2085118315-172.17.0.2-1732250110796:blk_1073741851_1034 to 127.0.0.1:43317 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:36,167 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@625232db[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39493, datanodeUuid=154db80d-b446-4abe-899a-1ee90a38892f, infoPort=44795, infoSecurePort=0, ipcPort=36607, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796):Failed to transfer BP-2085118315-172.17.0.2-1732250110796:blk_1073741846_1029 to 127.0.0.1:40269 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:36,229 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/63fd947ea4c449028775699f94927477 2024-11-22T04:35:36,236 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/63fd947ea4c449028775699f94927477 as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/63fd947ea4c449028775699f94927477 2024-11-22T04:35:36,244 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/63fd947ea4c449028775699f94927477, entries=1, sequenceid=34, filesize=5.9 K 2024-11-22T04:35:36,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a35ad475da12a258dad9be3d792a837c in 436ms, sequenceid=34, compaction requested=true 2024-11-22T04:35:36,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a35ad475da12a258dad9be3d792a837c: 2024-11-22T04:35:36,246 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-22T04:35:36,246 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:35:36,246 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/e94a0027676d47f59e097ced938bada7 because midkey is the same as first or last row 2024-11-22T04:35:36,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a35ad475da12a258dad9be3d792a837c:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T04:35:36,246 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:35:36,246 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T04:35:36,247 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T04:35:36,248 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.HStore(1541): a35ad475da12a258dad9be3d792a837c/info is initiating minor compaction (all files) 2024-11-22T04:35:36,248 INFO [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a35ad475da12a258dad9be3d792a837c/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:36,248 INFO [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/cd54c7c18842474fb8750331be1c9494, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/e94a0027676d47f59e097ced938bada7, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/63fd947ea4c449028775699f94927477] into tmpdir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp, totalSize=28.2 K 2024-11-22T04:35:36,248 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd54c7c18842474fb8750331be1c9494, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732250129672 2024-11-22T04:35:36,249 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] compactions.Compactor(225): Compacting e94a0027676d47f59e097ced938bada7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732250133754 2024-11-22T04:35:36,249 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 63fd947ea4c449028775699f94927477, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732250135808 2024-11-22T04:35:36,264 INFO [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a35ad475da12a258dad9be3d792a837c#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T04:35:36,265 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/1bb0b8a04d4844e3a6c2e635bd626a4f is 1080, key is row0002/info:/1732250129672/Put/seqid=0 2024-11-22T04:35:36,268 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43317 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:36,268 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41478 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741862_1045] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6]'}, localName='127.0.0.1:39493', datanodeUuid='154db80d-b446-4abe-899a-1ee90a38892f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741862_1045 to mirror 127.0.0.1:43317 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:36,268 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]) is bad. 2024-11-22T04:35:36,268 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741862_1045 2024-11-22T04:35:36,268 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41478 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741862_1045] {}] datanode.BlockReceiver(316): Block 1073741862 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T04:35:36,268 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41478 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741862_1045] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41478 dst: /127.0.0.1:39493 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:36,269 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK] 2024-11-22T04:35:36,270 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:36,270 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK], DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:36,270 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741863_1046 2024-11-22T04:35:36,271 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK] 2024-11-22T04:35:36,273 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:36,273 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:36,273 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741864_1047 2024-11-22T04:35:36,273 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:36,275 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:36,275 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:36,275 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741865_1048 2024-11-22T04:35:36,275 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:36,276 WARN [IPC Server handler 1 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T04:35:36,276 WARN [IPC Server handler 1 on default port 39459 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T04:35:36,276 WARN [IPC Server handler 1 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T04:35:36,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741866_1049 (size=17994) 2024-11-22T04:35:36,688 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/1bb0b8a04d4844e3a6c2e635bd626a4f as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/1bb0b8a04d4844e3a6c2e635bd626a4f 2024-11-22T04:35:36,697 INFO [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a35ad475da12a258dad9be3d792a837c/info of a35ad475da12a258dad9be3d792a837c into 1bb0b8a04d4844e3a6c2e635bd626a4f(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T04:35:36,697 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a35ad475da12a258dad9be3d792a837c: 2024-11-22T04:35:36,697 INFO [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c., storeName=a35ad475da12a258dad9be3d792a837c/info, priority=13, startTime=1732250136246; duration=0sec 2024-11-22T04:35:36,697 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T04:35:36,697 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:35:36,698 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/1bb0b8a04d4844e3a6c2e635bd626a4f because midkey is the same as first or last row 2024-11-22T04:35:36,698 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T04:35:36,698 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:35:36,698 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/1bb0b8a04d4844e3a6c2e635bd626a4f because midkey is the same as first or last row 2024-11-22T04:35:36,698 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-22T04:35:36,698 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:35:36,698 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/1bb0b8a04d4844e3a6c2e635bd626a4f because midkey is the same as first or last row 2024-11-22T04:35:36,698 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:35:36,698 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a35ad475da12a258dad9be3d792a837c:info 2024-11-22T04:35:37,226 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:37,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37839 {}] regionserver.HRegion(8855): Flush requested on a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:37,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a35ad475da12a258dad9be3d792a837c 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T04:35:37,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/0af9727f53ce45708969c5f10acd6b26 is 1079, key is tmprow/info:/1732250137229/Put/seqid=0 2024-11-22T04:35:37,238 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43317 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:37,238 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41504 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6]'}, localName='127.0.0.1:39493', datanodeUuid='154db80d-b446-4abe-899a-1ee90a38892f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741867_1050 to mirror 127.0.0.1:43317 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:37,238 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]) is bad. 2024-11-22T04:35:37,238 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741867_1050 2024-11-22T04:35:37,238 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41504 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T04:35:37,238 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41504 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41504 dst: /127.0.0.1:39493 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:37,239 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK] 2024-11-22T04:35:37,240 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:37,240 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:37,241 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741868_1051 2024-11-22T04:35:37,241 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:37,244 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38827 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:37,244 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41520 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6]'}, localName='127.0.0.1:39493', datanodeUuid='154db80d-b446-4abe-899a-1ee90a38892f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741869_1052 to mirror 127.0.0.1:38827 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:37,244 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:37,244 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741869_1052 2024-11-22T04:35:37,244 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41520 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T04:35:37,245 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41520 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41520 dst: /127.0.0.1:39493 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:37,245 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:37,247 WARN [Thread-952 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:37,247 WARN [Thread-952 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:37,247 WARN [Thread-952 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741870_1053 2024-11-22T04:35:37,248 WARN [Thread-952 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK] 2024-11-22T04:35:37,248 WARN [IPC Server handler 2 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T04:35:37,248 WARN [IPC Server handler 2 on default port 39459 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T04:35:37,248 WARN [IPC Server handler 2 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T04:35:37,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741871_1054 (size=6027) 2024-11-22T04:35:37,480 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:37,653 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/0af9727f53ce45708969c5f10acd6b26 2024-11-22T04:35:37,663 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/0af9727f53ce45708969c5f10acd6b26 as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/0af9727f53ce45708969c5f10acd6b26 2024-11-22T04:35:37,670 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/0af9727f53ce45708969c5f10acd6b26, entries=1, sequenceid=45, filesize=5.9 K 2024-11-22T04:35:37,672 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a35ad475da12a258dad9be3d792a837c in 442ms, sequenceid=45, compaction requested=false 2024-11-22T04:35:37,672 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a35ad475da12a258dad9be3d792a837c: 2024-11-22T04:35:37,672 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-22T04:35:37,672 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:35:37,672 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/1bb0b8a04d4844e3a6c2e635bd626a4f because midkey is the same as first or last row 2024-11-22T04:35:37,678 WARN [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]] 2024-11-22T04:35:37,678 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:37,679 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C37839%2C1732250113358:(num 1732250135657) roll requested 2024-11-22T04:35:37,679 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C37839%2C1732250113358.1732250137679 2024-11-22T04:35:37,682 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40269 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:37,682 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41542 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6]'}, localName='127.0.0.1:39493', datanodeUuid='154db80d-b446-4abe-899a-1ee90a38892f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741872_1055 to mirror 127.0.0.1:40269 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:37,683 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:37,683 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41542 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T04:35:37,683 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741872_1055 2024-11-22T04:35:37,683 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41542 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41542 dst: /127.0.0.1:39493 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:37,683 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:37,684 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:37,684 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:37,685 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741873_1056 2024-11-22T04:35:37,685 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK] 2024-11-22T04:35:37,686 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:37,686 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK], DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]) is bad. 2024-11-22T04:35:37,686 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741874_1057 2024-11-22T04:35:37,687 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK] 2024-11-22T04:35:37,688 WARN [Thread-958 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:37,688 WARN [Thread-958 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:37,688 WARN [Thread-958 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741875_1058 2024-11-22T04:35:37,689 WARN [Thread-958 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:37,690 WARN [IPC Server handler 3 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T04:35:37,690 WARN [IPC Server handler 3 on default port 39459 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T04:35:37,690 WARN [IPC Server handler 3 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T04:35:37,693 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:37,693 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:37,693 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:37,693 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:37,693 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:37,693 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250135657 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250137679 2024-11-22T04:35:37,694 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44795:44795)] 2024-11-22T04:35:37,694 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 is not closed yet, will try archiving it next time 2024-11-22T04:35:37,694 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250135657 is not closed yet, will try archiving it next time 2024-11-22T04:35:37,695 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250131627 to hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/oldWALs/8fc3ff0a63e6%2C37839%2C1732250113358.1732250131627 2024-11-22T04:35:37,695 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741856_1039 (size=13591) 2024-11-22T04:35:38,096 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 is not closed yet, will try archiving it next time 2024-11-22T04:35:38,651 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37839 {}] regionserver.HRegion(8855): Flush requested on a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:38,651 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a35ad475da12a258dad9be3d792a837c 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T04:35:38,655 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/6c8edf465f8a49f3b349c51866dbcf0e is 1079, key is tmprow/info:/1732250138650/Put/seqid=0 2024-11-22T04:35:38,657 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:38,657 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK], DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:38,657 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741877_1060 2024-11-22T04:35:38,658 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:38,660 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40269 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:38,660 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41560 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741878_1061] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6]'}, localName='127.0.0.1:39493', datanodeUuid='154db80d-b446-4abe-899a-1ee90a38892f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741878_1061 to mirror 127.0.0.1:40269 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:38,660 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:38,660 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741878_1061 2024-11-22T04:35:38,660 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41560 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741878_1061] {}] datanode.BlockReceiver(316): Block 1073741878 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T04:35:38,660 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41560 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741878_1061] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41560 dst: /127.0.0.1:39493 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:38,661 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:38,662 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:38,662 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:38,662 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741879_1062 2024-11-22T04:35:38,663 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK] 2024-11-22T04:35:38,665 WARN [Thread-963 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43317 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:38,665 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41568 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6]'}, localName='127.0.0.1:39493', datanodeUuid='154db80d-b446-4abe-899a-1ee90a38892f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741880_1063 to mirror 127.0.0.1:43317 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:38,665 WARN [Thread-963 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]) is bad. 2024-11-22T04:35:38,665 WARN [Thread-963 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741880_1063 2024-11-22T04:35:38,665 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41568 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T04:35:38,665 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41568 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41568 dst: /127.0.0.1:39493 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:38,666 WARN [Thread-963 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK] 2024-11-22T04:35:38,666 WARN [IPC Server handler 3 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T04:35:38,667 WARN [IPC Server handler 3 on default port 39459 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T04:35:38,667 WARN [IPC Server handler 3 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T04:35:38,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741881_1064 (size=6027) 2024-11-22T04:35:39,071 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/6c8edf465f8a49f3b349c51866dbcf0e 2024-11-22T04:35:39,082 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/6c8edf465f8a49f3b349c51866dbcf0e as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/6c8edf465f8a49f3b349c51866dbcf0e 2024-11-22T04:35:39,090 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/6c8edf465f8a49f3b349c51866dbcf0e, entries=1, sequenceid=55, filesize=5.9 K 2024-11-22T04:35:39,092 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a35ad475da12a258dad9be3d792a837c in 440ms, sequenceid=55, compaction requested=true 2024-11-22T04:35:39,092 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a35ad475da12a258dad9be3d792a837c: 2024-11-22T04:35:39,092 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-22T04:35:39,092 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:35:39,092 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/1bb0b8a04d4844e3a6c2e635bd626a4f because midkey is the same as first or last row 2024-11-22T04:35:39,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a35ad475da12a258dad9be3d792a837c:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T04:35:39,092 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:35:39,092 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T04:35:39,094 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T04:35:39,094 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.HStore(1541): a35ad475da12a258dad9be3d792a837c/info is initiating minor compaction (all files) 2024-11-22T04:35:39,094 INFO [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a35ad475da12a258dad9be3d792a837c/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:39,095 INFO [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/1bb0b8a04d4844e3a6c2e635bd626a4f, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/0af9727f53ce45708969c5f10acd6b26, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/6c8edf465f8a49f3b349c51866dbcf0e] into tmpdir=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp, totalSize=29.3 K 2024-11-22T04:35:39,095 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1bb0b8a04d4844e3a6c2e635bd626a4f, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732250129672 2024-11-22T04:35:39,096 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0af9727f53ce45708969c5f10acd6b26, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732250137229 2024-11-22T04:35:39,096 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6c8edf465f8a49f3b349c51866dbcf0e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732250138650 2024-11-22T04:35:39,112 INFO [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a35ad475da12a258dad9be3d792a837c#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T04:35:39,112 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/b5d8764dd4394f109545608223bd90fc is 1080, key is row0002/info:/1732250129672/Put/seqid=0 2024-11-22T04:35:39,114 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:39,115 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK], DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:39,115 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741882_1065 2024-11-22T04:35:39,115 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:39,117 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35675 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:39,117 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41584 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741883_1066] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6]'}, localName='127.0.0.1:39493', datanodeUuid='154db80d-b446-4abe-899a-1ee90a38892f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741883_1066 to mirror 127.0.0.1:35675 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:39,117 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:39,117 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741883_1066 2024-11-22T04:35:39,117 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41584 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741883_1066] {}] datanode.BlockReceiver(316): Block 1073741883 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T04:35:39,117 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41584 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741883_1066] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41584 dst: /127.0.0.1:39493 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:39,118 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK] 2024-11-22T04:35:39,120 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40269 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:39,120 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41590 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741884_1067] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6]'}, localName='127.0.0.1:39493', datanodeUuid='154db80d-b446-4abe-899a-1ee90a38892f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741884_1067 to mirror 127.0.0.1:40269 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:39,120 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:39,120 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741884_1067 2024-11-22T04:35:39,120 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41590 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741884_1067] {}] datanode.BlockReceiver(316): Block 1073741884 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T04:35:39,121 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41590 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741884_1067] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41590 dst: /127.0.0.1:39493 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:39,121 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:39,124 WARN [Thread-969 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:43317 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:39,124 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41598 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6]'}, localName='127.0.0.1:39493', datanodeUuid='154db80d-b446-4abe-899a-1ee90a38892f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741885_1068 to mirror 127.0.0.1:43317 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:39,124 WARN [Thread-969 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]) is bad. 2024-11-22T04:35:39,124 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41598 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T04:35:39,124 WARN [Thread-969 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741885_1068 2024-11-22T04:35:39,124 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41598 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41598 dst: /127.0.0.1:39493 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:39,125 WARN [Thread-969 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:43317,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK] 2024-11-22T04:35:39,125 WARN [IPC Server handler 3 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-22T04:35:39,125 WARN [IPC Server handler 3 on default port 39459 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-22T04:35:39,126 WARN [IPC Server handler 3 on default port 39459 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-22T04:35:39,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741886_1069 (size=18097) 2024-11-22T04:35:39,167 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@625232db[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39493, datanodeUuid=154db80d-b446-4abe-899a-1ee90a38892f, infoPort=44795, infoSecurePort=0, ipcPort=36607, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796):Failed to transfer BP-2085118315-172.17.0.2-1732250110796:blk_1073741841_1024 to 127.0.0.1:40269 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:39,167 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6c95d0e1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39493, datanodeUuid=154db80d-b446-4abe-899a-1ee90a38892f, infoPort=44795, infoSecurePort=0, ipcPort=36607, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796):Failed to transfer BP-2085118315-172.17.0.2-1732250110796:blk_1073741861_1044 to 127.0.0.1:35675 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:39,226 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:39,480 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:39,540 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/b5d8764dd4394f109545608223bd90fc as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/b5d8764dd4394f109545608223bd90fc 2024-11-22T04:35:39,550 INFO [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a35ad475da12a258dad9be3d792a837c/info of a35ad475da12a258dad9be3d792a837c into b5d8764dd4394f109545608223bd90fc(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T04:35:39,550 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a35ad475da12a258dad9be3d792a837c: 2024-11-22T04:35:39,550 INFO [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c., storeName=a35ad475da12a258dad9be3d792a837c/info, priority=13, startTime=1732250139092; duration=0sec 2024-11-22T04:35:39,551 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-22T04:35:39,551 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:35:39,551 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/b5d8764dd4394f109545608223bd90fc because midkey is the same as first or last row 2024-11-22T04:35:39,551 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-22T04:35:39,551 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:35:39,551 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/b5d8764dd4394f109545608223bd90fc because midkey is the same as first or last row 2024-11-22T04:35:39,551 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-22T04:35:39,551 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:35:39,551 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/b5d8764dd4394f109545608223bd90fc because midkey is the same as first or last row 2024-11-22T04:35:39,551 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:35:39,551 DEBUG [RS:0;8fc3ff0a63e6:37839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a35ad475da12a258dad9be3d792a837c:info 2024-11-22T04:35:39,695 WARN [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-22T04:35:39,695 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:39,883 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:35:39,888 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:35:39,889 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:35:39,889 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:35:39,889 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:35:39,890 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@665e2468{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:35:39,890 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bee76e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:35:39,996 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62dc63d8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/java.io.tmpdir/jetty-localhost-37851-hadoop-hdfs-3_4_1-tests_jar-_-any-217354464834134036/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:35:39,996 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@424d92de{HTTP/1.1, (http/1.1)}{localhost:37851} 2024-11-22T04:35:39,997 INFO [Time-limited test {}] server.Server(415): Started @137263ms 2024-11-22T04:35:39,998 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:35:40,167 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@625232db[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39493, datanodeUuid=154db80d-b446-4abe-899a-1ee90a38892f, infoPort=44795, infoSecurePort=0, ipcPort=36607, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796):Failed to transfer BP-2085118315-172.17.0.2-1732250110796:blk_1073741871_1054 to 127.0.0.1:43317 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:40,167 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6c95d0e1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39493, datanodeUuid=154db80d-b446-4abe-899a-1ee90a38892f, infoPort=44795, infoSecurePort=0, ipcPort=36607, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796):Failed to transfer BP-2085118315-172.17.0.2-1732250110796:blk_1073741866_1049 to 127.0.0.1:35675 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:40,483 WARN [Thread-990 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:35:40,491 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa01cb52f30068ade with lease ID 0xa7190061b2006bad: from storage DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc node DatanodeRegistration(127.0.0.1:36561, datanodeUuid=af0b0e36-61ae-483f-a85d-ba85a4dacb3f, infoPort=39531, infoSecurePort=0, ipcPort=43215, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T04:35:40,491 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa01cb52f30068ade with lease ID 0xa7190061b2006bad: from storage DS-f22eae57-3539-45da-968f-18b89499b62a node DatanodeRegistration(127.0.0.1:36561, datanodeUuid=af0b0e36-61ae-483f-a85d-ba85a4dacb3f, infoPort=39531, infoSecurePort=0, ipcPort=43215, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:35:41,227 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:41,481 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:41,695 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:42,167 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6c95d0e1[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39493, datanodeUuid=154db80d-b446-4abe-899a-1ee90a38892f, infoPort=44795, infoSecurePort=0, ipcPort=36607, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796):Failed to transfer BP-2085118315-172.17.0.2-1732250110796:blk_1073741881_1064 to 127.0.0.1:38827 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:42,167 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@625232db[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39493, datanodeUuid=154db80d-b446-4abe-899a-1ee90a38892f, infoPort=44795, infoSecurePort=0, ipcPort=36607, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796):Failed to transfer BP-2085118315-172.17.0.2-1732250110796:blk_1073741856_1039 to 127.0.0.1:35675 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:43,168 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T04:35:43,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741886_1069 (size=18097) 2024-11-22T04:35:43,227 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:43,481 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:43,696 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:44,163 ERROR [FSHLog-0-hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData-prefix:8fc3ff0a63e6,46195,1732250113188 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:44,163 WARN [FSHLog-0-hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData-prefix:8fc3ff0a63e6,46195,1732250113188 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:44,163 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C46195%2C1732250113188:(num 1732250113907) roll requested 2024-11-22T04:35:44,164 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C46195%2C1732250113188.1732250144164 2024-11-22T04:35:44,169 WARN [Thread-1011 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:35675 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:44,169 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_29404711_22 at /127.0.0.1:53046 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data4]'}, localName='127.0.0.1:36561', datanodeUuid='af0b0e36-61ae-483f-a85d-ba85a4dacb3f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741887_1070 to mirror 127.0.0.1:35675 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:44,169 WARN [Thread-1011 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36561,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK], DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:44,169 WARN [Thread-1011 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741887_1070 2024-11-22T04:35:44,169 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_29404711_22 at /127.0.0.1:53046 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T04:35:44,170 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_29404711_22 at /127.0.0.1:53046 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:36561:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53046 dst: /127.0.0.1:36561 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:44,170 WARN [Thread-1011 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK] 2024-11-22T04:35:44,173 WARN [Thread-1011 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38827 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:44,173 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_29404711_22 at /127.0.0.1:53058 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741888_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data4]'}, localName='127.0.0.1:36561', datanodeUuid='af0b0e36-61ae-483f-a85d-ba85a4dacb3f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741888_1071 to mirror 127.0.0.1:38827 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:44,173 WARN [Thread-1011 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36561,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK], DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:44,173 WARN [Thread-1011 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741888_1071 2024-11-22T04:35:44,173 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_29404711_22 at /127.0.0.1:53058 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741888_1071] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T04:35:44,173 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_29404711_22 at /127.0.0.1:53058 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741888_1071] {}] datanode.DataXceiver(331): 127.0.0.1:36561:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53058 dst: /127.0.0.1:36561 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:44,174 WARN [Thread-1011 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:44,175 WARN [Thread-1011 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:44,175 WARN [Thread-1011 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK], DatanodeInfoWithStorage[127.0.0.1:36561,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:44,175 WARN [Thread-1011 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741889_1072 2024-11-22T04:35:44,176 WARN [Thread-1011 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:44,180 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:44,180 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:44,180 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:44,180 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:44,180 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:44,181 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/WALs/8fc3ff0a63e6,46195,1732250113188/8fc3ff0a63e6%2C46195%2C1732250113188.1732250113907 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/WALs/8fc3ff0a63e6,46195,1732250113188/8fc3ff0a63e6%2C46195%2C1732250113188.1732250144164 2024-11-22T04:35:44,181 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:44,181 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:44,181 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/WALs/8fc3ff0a63e6,46195,1732250113188/8fc3ff0a63e6%2C46195%2C1732250113188.1732250113907 2024-11-22T04:35:44,182 WARN [IPC Server handler 2 on default port 39459 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/WALs/8fc3ff0a63e6,46195,1732250113188/8fc3ff0a63e6%2C46195%2C1732250113188.1732250113907 has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741830_1006 2024-11-22T04:35:44,182 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/WALs/8fc3ff0a63e6,46195,1732250113188/8fc3ff0a63e6%2C46195%2C1732250113188.1732250113907 after 1ms 2024-11-22T04:35:44,184 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39531:39531),(127.0.0.1/127.0.0.1:44795:44795)] 2024-11-22T04:35:44,185 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/WALs/8fc3ff0a63e6,46195,1732250113188/8fc3ff0a63e6%2C46195%2C1732250113188.1732250113907 is not closed yet, will try archiving it next time 2024-11-22T04:35:45,228 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:45,697 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:47,228 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:47,697 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:48,184 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/WALs/8fc3ff0a63e6,46195,1732250113188/8fc3ff0a63e6%2C46195%2C1732250113188.1732250113907 after 4002ms 2024-11-22T04:35:49,228 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:49,697 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:50,507 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@e09112a {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2085118315-172.17.0.2-1732250110796:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:40269,null,null]) java.net.ConnectException: Call From 8fc3ff0a63e6/172.17.0.2 to localhost:33685 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T04:35:50,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741833_1019 (size=455) 2024-11-22T04:35:50,651 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 to hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/oldWALs/8fc3ff0a63e6%2C37839%2C1732250113358.1732250114416 2024-11-22T04:35:50,652 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250135657 to hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/oldWALs/8fc3ff0a63e6%2C37839%2C1732250113358.1732250135657 2024-11-22T04:35:51,229 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:51,490 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6e26ee65[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:36561, datanodeUuid=af0b0e36-61ae-483f-a85d-ba85a4dacb3f, infoPort=39531, infoSecurePort=0, ipcPort=43215, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796):Failed to transfer BP-2085118315-172.17.0.2-1732250110796:blk_1073741833_1019 to 127.0.0.1:38827 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:51,698 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,229 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,545 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C37839%2C1732250113358.1732250153544 2024-11-22T04:35:53,550 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,550 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:53,550 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741891_1075 2024-11-22T04:35:53,551 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:53,562 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,562 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,562 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,562 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,563 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,563 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250137679 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250153544 2024-11-22T04:35:53,564 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39531:39531),(127.0.0.1/127.0.0.1:44795:44795)] 2024-11-22T04:35:53,564 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250137679 is not closed yet, will try archiving it next time 2024-11-22T04:35:53,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741876_1059 (size=12911) 2024-11-22T04:35:53,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37839 {}] regionserver.HRegion(8855): Flush requested on a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:53,568 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a35ad475da12a258dad9be3d792a837c 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-22T04:35:53,573 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/e3fef17d000c48fd86f58cb87c65e467 is 1080, key is row0013/info:/1732250153565/Put/seqid=0 2024-11-22T04:35:53,576 WARN [Thread-1031 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,576 WARN [Thread-1031 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:53,576 WARN [Thread-1031 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741893_1077 2024-11-22T04:35:53,577 WARN [Thread-1031 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:53,578 WARN [Thread-1031 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,579 WARN [Thread-1031 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741894_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:53,579 WARN [Thread-1031 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741894_1078 2024-11-22T04:35:53,579 WARN [Thread-1031 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:53,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741895_1079 (size=8190) 2024-11-22T04:35:53,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741895_1079 (size=8190) 2024-11-22T04:35:53,586 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/e3fef17d000c48fd86f58cb87c65e467 2024-11-22T04:35:53,593 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/e3fef17d000c48fd86f58cb87c65e467 as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/e3fef17d000c48fd86f58cb87c65e467 2024-11-22T04:35:53,600 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/e3fef17d000c48fd86f58cb87c65e467, entries=3, sequenceid=66, filesize=8.0 K 2024-11-22T04:35:53,602 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for a35ad475da12a258dad9be3d792a837c in 33ms, sequenceid=66, compaction requested=false 2024-11-22T04:35:53,602 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a35ad475da12a258dad9be3d792a837c: 2024-11-22T04:35:53,602 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-22T04:35:53,602 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:35:53,602 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/b5d8764dd4394f109545608223bd90fc because midkey is the same as first or last row 2024-11-22T04:35:53,698 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-22T04:35:53,698 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T04:35:53,793 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T04:35:53,793 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:35:53,793 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:53,793 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:53,793 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T04:35:53,793 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T04:35:53,793 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2025209955, stopped=false 2024-11-22T04:35:53,793 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=8fc3ff0a63e6,46195,1732250113188 2024-11-22T04:35:53,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:35:53,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x10160d3103f0002, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:35:53,845 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:35:53,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:53,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x10160d3103f0002, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:53,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:53,846 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T04:35:53,846 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T04:35:53,846 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:35:53,846 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:53,847 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '8fc3ff0a63e6,37839,1732250113358' ***** 2024-11-22T04:35:53,847 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T04:35:53,847 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '8fc3ff0a63e6,38269,1732250115127' ***** 2024-11-22T04:35:53,847 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T04:35:53,847 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T04:35:53,847 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T04:35:53,848 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T04:35:53,848 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:35:53,848 INFO [RS:0;8fc3ff0a63e6:37839 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T04:35:53,848 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:38269-0x10160d3103f0002, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:35:53,848 INFO [RS:0;8fc3ff0a63e6:37839 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T04:35:53,848 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(3091): Received CLOSE for a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:53,848 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:35:53,849 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T04:35:53,849 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(959): stopping server 8fc3ff0a63e6,37839,1732250113358 2024-11-22T04:35:53,849 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:35:53,849 INFO [RS:1;8fc3ff0a63e6:38269 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T04:35:53,849 INFO [RS:0;8fc3ff0a63e6:37839 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;8fc3ff0a63e6:37839. 2024-11-22T04:35:53,849 INFO [RS:1;8fc3ff0a63e6:38269 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T04:35:53,849 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:35:53,849 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.HRegionServer(959): stopping server 8fc3ff0a63e6,38269,1732250115127 2024-11-22T04:35:53,850 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:53,850 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:35:53,850 INFO [RS:1;8fc3ff0a63e6:38269 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;8fc3ff0a63e6:38269. 2024-11-22T04:35:53,850 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T04:35:53,850 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T04:35:53,850 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T04:35:53,850 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:35:53,850 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:53,850 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T04:35:53,850 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.HRegionServer(976): stopping server 8fc3ff0a63e6,38269,1732250115127; all regions closed. 2024-11-22T04:35:53,850 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a35ad475da12a258dad9be3d792a837c, disabling compactions & flushes 2024-11-22T04:35:53,851 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:53,851 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:53,851 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. after waiting 0 ms 2024-11-22T04:35:53,851 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:53,851 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing a35ad475da12a258dad9be3d792a837c 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-22T04:35:53,852 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T04:35:53,852 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(1325): Online Regions={a35ad475da12a258dad9be3d792a837c=TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c., 1588230740=hbase:meta,,1.1588230740} 2024-11-22T04:35:53,852 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a35ad475da12a258dad9be3d792a837c 2024-11-22T04:35:53,852 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T04:35:53,852 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T04:35:53,852 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T04:35:53,852 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T04:35:53,852 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T04:35:53,852 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,852 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-22T04:35:53,852 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,852 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,853 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,853 ERROR [FSHLog-0-hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0-prefix:8fc3ff0a63e6,37839,1732250113358.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,853 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,853 WARN [FSHLog-0-hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0-prefix:8fc3ff0a63e6,37839,1732250113358.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,853 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C37839%2C1732250113358.meta:.meta(num 1732250114864) roll requested 2024-11-22T04:35:53,853 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250153853.meta 2024-11-22T04:35:53,853 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,853 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,854 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 2024-11-22T04:35:53,854 WARN [IPC Server handler 2 on default port 39459 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 has not been closed. Lease recovery is in progress. RecoveryId = 1080 for block blk_1073741837_1013 2024-11-22T04:35:53,854 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 after 0ms 2024-11-22T04:35:53,857 WARN [Thread-1039 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1081 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,857 WARN [Thread-1039 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741896_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK], DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:53,857 WARN [Thread-1039 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741896_1081 2024-11-22T04:35:53,858 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/48392c756b604112b67b81b4b4acf5da is 1080, key is row0015/info:/1732250153569/Put/seqid=0 2024-11-22T04:35:53,859 WARN [Thread-1039 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:53,861 WARN [Thread-1039 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,861 WARN [Thread-1039 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741898_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:53,861 WARN [Thread-1039 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741898_1083 2024-11-22T04:35:53,861 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1082 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38827 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,861 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41202 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741897_1082] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data4]'}, localName='127.0.0.1:36561', datanodeUuid='af0b0e36-61ae-483f-a85d-ba85a4dacb3f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741897_1082 to mirror 127.0.0.1:38827 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:53,861 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741897_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36561,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK], DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:53,861 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41202 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741897_1082] {}] datanode.BlockReceiver(316): Block 1073741897 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T04:35:53,861 WARN [Thread-1039 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK] 2024-11-22T04:35:53,861 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41202 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741897_1082] {}] datanode.DataXceiver(331): 127.0.0.1:36561:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41202 dst: /127.0.0.1:36561 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:53,862 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741897_1082 2024-11-22T04:35:53,863 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:53,864 WARN [Thread-1039 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1084 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38827 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,864 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:51000 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741899_1084] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6]'}, localName='127.0.0.1:39493', datanodeUuid='154db80d-b446-4abe-899a-1ee90a38892f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741899_1084 to mirror 127.0.0.1:38827 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:53,864 WARN [Thread-1039 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741899_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK], DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:53,864 WARN [Thread-1039 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741899_1084 2024-11-22T04:35:53,864 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:51000 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741899_1084] {}] datanode.BlockReceiver(316): Block 1073741899 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-22T04:35:53,865 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:51000 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741899_1084] {}] datanode.DataXceiver(331): 127.0.0.1:39493:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51000 dst: /127.0.0.1:39493 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:53,865 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,865 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741900_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK], DatanodeInfoWithStorage[127.0.0.1:36561,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:53,865 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741900_1085 2024-11-22T04:35:53,865 WARN [Thread-1039 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:53,865 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:53,867 WARN [Thread-1040 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741902_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,867 WARN [Thread-1040 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741902_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:53,867 WARN [Thread-1040 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741902_1087 2024-11-22T04:35:53,867 WARN [Thread-1040 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK] 2024-11-22T04:35:53,872 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,872 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,873 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,873 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,873 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:53,873 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250153853.meta 2024-11-22T04:35:53,876 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,877 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,877 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta 2024-11-22T04:35:53,877 WARN [IPC Server handler 2 on default port 39459 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta has not been closed. Lease recovery is in progress. RecoveryId = 1089 for block blk_1073741834_1010 2024-11-22T04:35:53,877 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta after 0ms 2024-11-22T04:35:53,878 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39531:39531),(127.0.0.1/127.0.0.1:44795:44795)] 2024-11-22T04:35:53,878 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta is not closed yet, will try archiving it next time 2024-11-22T04:35:53,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741903_1088 (size=14660) 2024-11-22T04:35:53,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741903_1088 (size=14660) 2024-11-22T04:35:53,880 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/48392c756b604112b67b81b4b4acf5da 2024-11-22T04:35:53,889 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/.tmp/info/48392c756b604112b67b81b4b4acf5da as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/48392c756b604112b67b81b4b4acf5da 2024-11-22T04:35:53,897 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/.tmp/info/2b97be1aa9104942ae9731dc4990918c is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c./info:regioninfo/1732250115628/Put/seqid=0 2024-11-22T04:35:53,899 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/48392c756b604112b67b81b4b4acf5da, entries=9, sequenceid=78, filesize=14.3 K 2024-11-22T04:35:53,899 WARN [Thread-1053 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741904_1090 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,899 WARN [Thread-1053 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741904_1090 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK], DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK]) is bad. 2024-11-22T04:35:53,899 WARN [Thread-1053 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741904_1090 2024-11-22T04:35:53,900 WARN [Thread-1053 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40269,DS-801bf5dc-deb9-4e8e-9476-4fd039b9d875,DISK] 2024-11-22T04:35:53,900 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for a35ad475da12a258dad9be3d792a837c in 49ms, sequenceid=78, compaction requested=true 2024-11-22T04:35:53,901 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/cd54c7c18842474fb8750331be1c9494, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/e94a0027676d47f59e097ced938bada7, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/1bb0b8a04d4844e3a6c2e635bd626a4f, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/63fd947ea4c449028775699f94927477, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/0af9727f53ce45708969c5f10acd6b26, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/6c8edf465f8a49f3b349c51866dbcf0e] to archive 2024-11-22T04:35:53,901 WARN [Thread-1053 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1091 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,902 WARN [Thread-1053 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741905_1091 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK], DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:53,902 WARN [Thread-1053 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741905_1091 2024-11-22T04:35:53,902 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T04:35:53,902 WARN [Thread-1053 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:53,907 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/cd54c7c18842474fb8750331be1c9494 to hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/cd54c7c18842474fb8750331be1c9494 2024-11-22T04:35:53,908 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/e94a0027676d47f59e097ced938bada7 to hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/e94a0027676d47f59e097ced938bada7 2024-11-22T04:35:53,911 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/1bb0b8a04d4844e3a6c2e635bd626a4f to hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/1bb0b8a04d4844e3a6c2e635bd626a4f 2024-11-22T04:35:53,913 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/63fd947ea4c449028775699f94927477 to hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/63fd947ea4c449028775699f94927477 2024-11-22T04:35:53,914 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/0af9727f53ce45708969c5f10acd6b26 to hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/0af9727f53ce45708969c5f10acd6b26 2024-11-22T04:35:53,916 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/6c8edf465f8a49f3b349c51866dbcf0e to hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/info/6c8edf465f8a49f3b349c51866dbcf0e 2024-11-22T04:35:53,917 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=8fc3ff0a63e6:46195 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-22T04:35:53,918 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [cd54c7c18842474fb8750331be1c9494=10347, e94a0027676d47f59e097ced938bada7=12506, 1bb0b8a04d4844e3a6c2e635bd626a4f=17994, 63fd947ea4c449028775699f94927477=6027, 0af9727f53ce45708969c5f10acd6b26=6027, 6c8edf465f8a49f3b349c51866dbcf0e=6027] 2024-11-22T04:35:53,920 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741906_1092 (size=7089) 2024-11-22T04:35:53,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741906_1092 (size=7089) 2024-11-22T04:35:53,925 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/.tmp/info/2b97be1aa9104942ae9731dc4990918c 2024-11-22T04:35:53,928 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a35ad475da12a258dad9be3d792a837c/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-22T04:35:53,928 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:53,929 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a35ad475da12a258dad9be3d792a837c: Waiting for close lock at 1732250153850Running coprocessor pre-close hooks at 1732250153850Disabling compacts and flushes for region at 1732250153850Disabling writes for close at 1732250153851 (+1 ms)Obtaining lock to block concurrent updates at 1732250153851Preparing flush snapshotting stores in a35ad475da12a258dad9be3d792a837c at 1732250153851Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1732250153852 (+1 ms)Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. at 1732250153852Flushing a35ad475da12a258dad9be3d792a837c/info: creating writer at 1732250153853 (+1 ms)Flushing a35ad475da12a258dad9be3d792a837c/info: appending metadata at 1732250153857 (+4 ms)Flushing a35ad475da12a258dad9be3d792a837c/info: closing flushed file at 1732250153857Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@23fea136: reopening flushed file at 1732250153888 (+31 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for a35ad475da12a258dad9be3d792a837c in 49ms, sequenceid=78, compaction requested=true at 1732250153900 (+12 ms)Writing region close event to WAL at 1732250153918 (+18 ms)Running coprocessor post-close hooks at 1732250153928 (+10 ms)Closed at 1732250153928 2024-11-22T04:35:53,929 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732250115257.a35ad475da12a258dad9be3d792a837c. 2024-11-22T04:35:53,949 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/.tmp/ns/35642178d24c4c69b092189f466fe8d1 is 43, key is default/ns:d/1732250115049/Put/seqid=0 2024-11-22T04:35:53,951 WARN [Thread-1060 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741907_1093 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,951 WARN [Thread-1060 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741907_1093 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK], DatanodeInfoWithStorage[127.0.0.1:39493,DS-6f380372-c9f9-4f4b-966e-64c38a3051b4,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:53,951 WARN [Thread-1060 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741907_1093 2024-11-22T04:35:53,952 WARN [Thread-1060 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK] 2024-11-22T04:35:53,953 WARN [Thread-1060 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741908_1094 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,953 WARN [Thread-1060 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741908_1094 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK], DatanodeInfoWithStorage[127.0.0.1:36561,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:53,953 WARN [Thread-1060 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741908_1094 2024-11-22T04:35:53,954 WARN [Thread-1060 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:53,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741909_1095 (size=5153) 2024-11-22T04:35:53,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741909_1095 (size=5153) 2024-11-22T04:35:53,959 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/.tmp/ns/35642178d24c4c69b092189f466fe8d1 2024-11-22T04:35:53,965 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.1732250137679 to hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/oldWALs/8fc3ff0a63e6%2C37839%2C1732250113358.1732250137679 2024-11-22T04:35:53,988 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/.tmp/table/804c534256d6483589f1f5ea17c3ab92 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732250115638/Put/seqid=0 2024-11-22T04:35:53,990 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741910_1096 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,990 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741910_1096 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK], DatanodeInfoWithStorage[127.0.0.1:36561,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK]) is bad. 2024-11-22T04:35:53,990 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741910_1096 2024-11-22T04:35:53,990 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:35675,DS-9266d51e-4cdb-4fd3-b19b-9093cdce61f3,DISK] 2024-11-22T04:35:53,993 WARN [Thread-1066 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741911_1097 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38827 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:35:53,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41256 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741911_1097] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data4]'}, localName='127.0.0.1:36561', datanodeUuid='af0b0e36-61ae-483f-a85d-ba85a4dacb3f', xmitsInProgress=0}:Exception transferring block BP-2085118315-172.17.0.2-1732250110796:blk_1073741911_1097 to mirror 127.0.0.1:38827 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:53,993 WARN [Thread-1066 {}] hdfs.DataStreamer(1731): Error Recovery for BP-2085118315-172.17.0.2-1732250110796:blk_1073741911_1097 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36561,DS-7b8c9dbd-9032-49da-b1d5-7fe72c0543cc,DISK], DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK]) is bad. 2024-11-22T04:35:53,993 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41256 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741911_1097] {}] datanode.BlockReceiver(316): Block 1073741911 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-22T04:35:53,993 WARN [Thread-1066 {}] hdfs.DataStreamer(1850): Abandoning BP-2085118315-172.17.0.2-1732250110796:blk_1073741911_1097 2024-11-22T04:35:53,993 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1350685865_22 at /127.0.0.1:41256 [Receiving block BP-2085118315-172.17.0.2-1732250110796:blk_1073741911_1097] {}] datanode.DataXceiver(331): 127.0.0.1:36561:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41256 dst: /127.0.0.1:36561 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:53,994 WARN [Thread-1066 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38827,DS-9bece155-05a0-4e16-bd82-b4a49afc4fb2,DISK] 2024-11-22T04:35:53,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741912_1098 (size=5424) 2024-11-22T04:35:53,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741912_1098 (size=5424) 2024-11-22T04:35:54,000 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/.tmp/table/804c534256d6483589f1f5ea17c3ab92 2024-11-22T04:35:54,008 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/.tmp/info/2b97be1aa9104942ae9731dc4990918c as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/info/2b97be1aa9104942ae9731dc4990918c 2024-11-22T04:35:54,016 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/info/2b97be1aa9104942ae9731dc4990918c, entries=10, sequenceid=11, filesize=6.9 K 2024-11-22T04:35:54,017 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/.tmp/ns/35642178d24c4c69b092189f466fe8d1 as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/ns/35642178d24c4c69b092189f466fe8d1 2024-11-22T04:35:54,024 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/ns/35642178d24c4c69b092189f466fe8d1, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T04:35:54,026 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/.tmp/table/804c534256d6483589f1f5ea17c3ab92 as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/table/804c534256d6483589f1f5ea17c3ab92 2024-11-22T04:35:54,032 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/table/804c534256d6483589f1f5ea17c3ab92, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T04:35:54,033 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 181ms, sequenceid=11, compaction requested=false 2024-11-22T04:35:54,038 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T04:35:54,039 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T04:35:54,039 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T04:35:54,039 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732250153852Running coprocessor pre-close hooks at 1732250153852Disabling compacts and flushes for region at 1732250153852Disabling writes for close at 1732250153852Obtaining lock to block concurrent updates at 1732250153852Preparing flush snapshotting stores in 1588230740 at 1732250153852Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732250153853 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732250153879 (+26 ms)Flushing 1588230740/info: creating writer at 1732250153879Flushing 1588230740/info: appending metadata at 1732250153897 (+18 ms)Flushing 1588230740/info: closing flushed file at 1732250153897Flushing 1588230740/ns: creating writer at 1732250153932 (+35 ms)Flushing 1588230740/ns: appending metadata at 1732250153949 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732250153949Flushing 1588230740/table: creating writer at 1732250153967 (+18 ms)Flushing 1588230740/table: appending metadata at 1732250153987 (+20 ms)Flushing 1588230740/table: closing flushed file at 1732250153987Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@668631bd: reopening flushed file at 1732250154007 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a80b025: reopening flushed file at 1732250154016 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1f1727f7: reopening flushed file at 1732250154025 (+9 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 181ms, sequenceid=11, compaction requested=false at 1732250154033 (+8 ms)Writing region close event to WAL at 1732250154034 (+1 ms)Running coprocessor post-close hooks at 1732250154039 (+5 ms)Closed at 1732250154039 2024-11-22T04:35:54,040 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T04:35:54,052 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(976): stopping server 8fc3ff0a63e6,37839,1732250113358; all regions closed. 2024-11-22T04:35:54,053 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:54,053 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:54,053 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:54,053 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:54,053 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:54,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741901_1086 (size=825) 2024-11-22T04:35:54,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741901_1086 (size=825) 2024-11-22T04:35:54,166 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@625232db[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39493, datanodeUuid=154db80d-b446-4abe-899a-1ee90a38892f, infoPort=44795, infoSecurePort=0, ipcPort=36607, storageInfo=lv=-57;cid=testClusterID;nsid=891927074;c=1732250110796):Failed to transfer BP-2085118315-172.17.0.2-1732250110796:blk_1073741876_1059 to 127.0.0.1:35675 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:35:54,225 INFO [regionserver/8fc3ff0a63e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T04:35:54,225 INFO [regionserver/8fc3ff0a63e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T04:35:54,250 INFO [regionserver/8fc3ff0a63e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T04:35:54,250 INFO [regionserver/8fc3ff0a63e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T04:35:54,253 INFO [regionserver/8fc3ff0a63e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:35:55,065 INFO [master/8fc3ff0a63e6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T04:35:55,065 INFO [master/8fc3ff0a63e6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T04:35:55,226 INFO [regionserver/8fc3ff0a63e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:35:57,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:35:57,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741836_1012 (size=76) 2024-11-22T04:35:57,855 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 after 4001ms 2024-11-22T04:35:57,878 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta after 4001ms 2024-11-22T04:35:58,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:35:58,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:35:58,853 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-22T04:35:58,856 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/oldWALs 2024-11-22T04:35:58,856 INFO [RS:1;8fc3ff0a63e6:38269 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C38269%2C1732250115127:(num 1732250115358) 2024-11-22T04:35:58,856 DEBUG [RS:1;8fc3ff0a63e6:38269 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:58,856 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:35:58,856 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:35:58,856 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.ChoreService(370): Chore service for: regionserver/8fc3ff0a63e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T04:35:58,857 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T04:35:58,857 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:35:58,857 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T04:35:58,857 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T04:35:58,857 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:35:58,857 INFO [RS:1;8fc3ff0a63e6:38269 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38269 2024-11-22T04:35:58,861 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:35:58,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:35:58,919 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x10160d3103f0002, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8fc3ff0a63e6,38269,1732250115127 2024-11-22T04:35:58,919 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:35:58,929 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8fc3ff0a63e6,38269,1732250115127] 2024-11-22T04:35:58,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:58,939 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/8fc3ff0a63e6,38269,1732250115127 already deleted, retry=false 2024-11-22T04:35:58,940 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 8fc3ff0a63e6,38269,1732250115127 expired; onlineServers=1 2024-11-22T04:35:58,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:58,950 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:58,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:58,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:58,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:58,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:58,964 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:59,029 INFO [RS:1;8fc3ff0a63e6:38269 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:35:59,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x10160d3103f0002, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:35:59,029 INFO [RS:1;8fc3ff0a63e6:38269 {}] regionserver.HRegionServer(1031): Exiting; stopping=8fc3ff0a63e6,38269,1732250115127; zookeeper connection closed. 2024-11-22T04:35:59,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:38269-0x10160d3103f0002, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:35:59,030 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@570c08f1 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@570c08f1 2024-11-22T04:35:59,053 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-22T04:35:59,057 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/oldWALs 2024-11-22T04:35:59,057 INFO [RS:0;8fc3ff0a63e6:37839 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C37839%2C1732250113358.meta:.meta(num 1732250153853) 2024-11-22T04:35:59,057 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:59,057 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:59,057 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:59,058 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:59,058 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:59,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741892_1076 (size=14682) 2024-11-22T04:35:59,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741892_1076 (size=14682) 2024-11-22T04:35:59,062 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/oldWALs 2024-11-22T04:35:59,062 INFO [RS:0;8fc3ff0a63e6:37839 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C37839%2C1732250113358:(num 1732250153544) 2024-11-22T04:35:59,062 DEBUG [RS:0;8fc3ff0a63e6:37839 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:35:59,062 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:35:59,062 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:35:59,062 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.ChoreService(370): Chore service for: regionserver/8fc3ff0a63e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T04:35:59,063 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:35:59,063 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:35:59,063 INFO [RS:0;8fc3ff0a63e6:37839 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37839 2024-11-22T04:35:59,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8fc3ff0a63e6,37839,1732250113358 2024-11-22T04:35:59,072 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:35:59,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:35:59,082 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8fc3ff0a63e6,37839,1732250113358] 2024-11-22T04:35:59,093 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/8fc3ff0a63e6,37839,1732250113358 already deleted, retry=false 2024-11-22T04:35:59,093 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 8fc3ff0a63e6,37839,1732250113358 expired; onlineServers=0 2024-11-22T04:35:59,093 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '8fc3ff0a63e6,46195,1732250113188' ***** 2024-11-22T04:35:59,093 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T04:35:59,093 INFO [M:0;8fc3ff0a63e6:46195 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:35:59,093 INFO [M:0;8fc3ff0a63e6:46195 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:35:59,094 DEBUG [M:0;8fc3ff0a63e6:46195 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T04:35:59,094 DEBUG [M:0;8fc3ff0a63e6:46195 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T04:35:59,094 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T04:35:59,094 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250114163 {}] cleaner.HFileCleaner(306): Exit Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250114163,5,FailOnTimeoutGroup] 2024-11-22T04:35:59,094 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250114163 {}] cleaner.HFileCleaner(306): Exit Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250114163,5,FailOnTimeoutGroup] 2024-11-22T04:35:59,094 INFO [M:0;8fc3ff0a63e6:46195 {}] hbase.ChoreService(370): Chore service for: master/8fc3ff0a63e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T04:35:59,094 INFO [M:0;8fc3ff0a63e6:46195 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:35:59,094 DEBUG [M:0;8fc3ff0a63e6:46195 {}] master.HMaster(1795): Stopping service threads 2024-11-22T04:35:59,094 INFO [M:0;8fc3ff0a63e6:46195 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T04:35:59,095 INFO [M:0;8fc3ff0a63e6:46195 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T04:35:59,095 INFO [M:0;8fc3ff0a63e6:46195 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T04:35:59,095 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T04:35:59,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T04:35:59,103 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:35:59,103 DEBUG [M:0;8fc3ff0a63e6:46195 {}] zookeeper.ZKUtil(347): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T04:35:59,103 WARN [M:0;8fc3ff0a63e6:46195 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T04:35:59,104 INFO [M:0;8fc3ff0a63e6:46195 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/.lastflushedseqids 2024-11-22T04:35:59,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741913_1099 (size=130) 2024-11-22T04:35:59,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741913_1099 (size=130) 2024-11-22T04:35:59,112 INFO [M:0;8fc3ff0a63e6:46195 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T04:35:59,112 INFO [M:0;8fc3ff0a63e6:46195 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T04:35:59,112 DEBUG [M:0;8fc3ff0a63e6:46195 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T04:35:59,112 INFO [M:0;8fc3ff0a63e6:46195 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:59,112 DEBUG [M:0;8fc3ff0a63e6:46195 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:59,112 DEBUG [M:0;8fc3ff0a63e6:46195 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T04:35:59,112 DEBUG [M:0;8fc3ff0a63e6:46195 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:59,113 INFO [M:0;8fc3ff0a63e6:46195 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-22T04:35:59,131 DEBUG [M:0;8fc3ff0a63e6:46195 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aaaa297c4cdd4b708eb637bccd20906b is 82, key is hbase:meta,,1/info:regioninfo/1732250114901/Put/seqid=0 2024-11-22T04:35:59,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741914_1100 (size=5672) 2024-11-22T04:35:59,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741914_1100 (size=5672) 2024-11-22T04:35:59,147 INFO [M:0;8fc3ff0a63e6:46195 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aaaa297c4cdd4b708eb637bccd20906b 2024-11-22T04:35:59,170 DEBUG [M:0;8fc3ff0a63e6:46195 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/44ba35fc156d44b49c49d3de2b4a6a73 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732250115643/Put/seqid=0 2024-11-22T04:35:59,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741915_1101 (size=6255) 2024-11-22T04:35:59,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741915_1101 (size=6255) 2024-11-22T04:35:59,176 INFO [M:0;8fc3ff0a63e6:46195 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/44ba35fc156d44b49c49d3de2b4a6a73 2024-11-22T04:35:59,182 INFO [M:0;8fc3ff0a63e6:46195 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 44ba35fc156d44b49c49d3de2b4a6a73 2024-11-22T04:35:59,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:35:59,182 INFO [RS:0;8fc3ff0a63e6:37839 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:35:59,182 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10160d3103f0001, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:35:59,182 INFO [RS:0;8fc3ff0a63e6:37839 {}] regionserver.HRegionServer(1031): Exiting; stopping=8fc3ff0a63e6,37839,1732250113358; zookeeper connection closed. 2024-11-22T04:35:59,183 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@591e06ea {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@591e06ea 2024-11-22T04:35:59,183 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-22T04:35:59,197 DEBUG [M:0;8fc3ff0a63e6:46195 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/918557272d304b9299809d7c7ab329f7 is 69, key is 8fc3ff0a63e6,37839,1732250113358/rs:state/1732250114222/Put/seqid=0 2024-11-22T04:35:59,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741916_1102 (size=5224) 2024-11-22T04:35:59,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741916_1102 (size=5224) 2024-11-22T04:35:59,202 INFO [M:0;8fc3ff0a63e6:46195 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/918557272d304b9299809d7c7ab329f7 2024-11-22T04:35:59,223 DEBUG [M:0;8fc3ff0a63e6:46195 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8c9af7eef1df4a0998c357dfea92bee8 is 52, key is load_balancer_on/state:d/1732250115102/Put/seqid=0 2024-11-22T04:35:59,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741917_1103 (size=5056) 2024-11-22T04:35:59,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741917_1103 (size=5056) 2024-11-22T04:35:59,228 INFO [M:0;8fc3ff0a63e6:46195 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8c9af7eef1df4a0998c357dfea92bee8 2024-11-22T04:35:59,234 DEBUG [M:0;8fc3ff0a63e6:46195 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/aaaa297c4cdd4b708eb637bccd20906b as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/aaaa297c4cdd4b708eb637bccd20906b 2024-11-22T04:35:59,240 INFO [M:0;8fc3ff0a63e6:46195 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/aaaa297c4cdd4b708eb637bccd20906b, entries=8, sequenceid=60, filesize=5.5 K 2024-11-22T04:35:59,241 DEBUG [M:0;8fc3ff0a63e6:46195 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/44ba35fc156d44b49c49d3de2b4a6a73 as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/44ba35fc156d44b49c49d3de2b4a6a73 2024-11-22T04:35:59,246 INFO [M:0;8fc3ff0a63e6:46195 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 44ba35fc156d44b49c49d3de2b4a6a73 2024-11-22T04:35:59,247 INFO [M:0;8fc3ff0a63e6:46195 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/44ba35fc156d44b49c49d3de2b4a6a73, entries=6, sequenceid=60, filesize=6.1 K 2024-11-22T04:35:59,248 DEBUG [M:0;8fc3ff0a63e6:46195 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/918557272d304b9299809d7c7ab329f7 as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/918557272d304b9299809d7c7ab329f7 2024-11-22T04:35:59,254 INFO [M:0;8fc3ff0a63e6:46195 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/918557272d304b9299809d7c7ab329f7, entries=2, sequenceid=60, filesize=5.1 K 2024-11-22T04:35:59,255 DEBUG [M:0;8fc3ff0a63e6:46195 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8c9af7eef1df4a0998c357dfea92bee8 as hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8c9af7eef1df4a0998c357dfea92bee8 2024-11-22T04:35:59,260 INFO [M:0;8fc3ff0a63e6:46195 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8c9af7eef1df4a0998c357dfea92bee8, entries=1, sequenceid=60, filesize=4.9 K 2024-11-22T04:35:59,261 INFO [M:0;8fc3ff0a63e6:46195 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=60, compaction requested=false 2024-11-22T04:35:59,263 INFO [M:0;8fc3ff0a63e6:46195 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:35:59,263 DEBUG [M:0;8fc3ff0a63e6:46195 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732250159112Disabling compacts and flushes for region at 1732250159112Disabling writes for close at 1732250159112Obtaining lock to block concurrent updates at 1732250159113 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732250159113Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732250159113Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732250159114 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732250159114Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732250159131 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732250159131Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732250159153 (+22 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732250159170 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732250159170Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732250159182 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732250159196 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732250159196Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732250159207 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732250159222 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732250159222Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42f32d37: reopening flushed file at 1732250159233 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20e7303b: reopening flushed file at 1732250159240 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a79832e: reopening flushed file at 1732250159247 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b1a4be2: reopening flushed file at 1732250159254 (+7 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 149ms, sequenceid=60, compaction requested=false at 1732250159261 (+7 ms)Writing region close event to WAL at 1732250159263 (+2 ms)Closed at 1732250159263 2024-11-22T04:35:59,263 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:59,263 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:59,263 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:59,263 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:59,264 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:35:59,265 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741890_1073 (size=1045) 2024-11-22T04:35:59,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36561 is added to blk_1073741890_1073 (size=1045) 2024-11-22T04:35:59,467 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T04:35:59,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:59,489 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:59,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:59,490 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:59,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:59,496 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:59,497 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:59,500 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:35:59,862 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:35:59,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:35:59,997 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-22T04:35:59,997 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T04:35:59,997 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T04:35:59,997 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T04:36:00,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:36:00,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39493 is added to blk_1073741835_1011 (size=393) 2024-11-22T04:36:00,511 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@504e0cb1 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2085118315-172.17.0.2-1732250110796:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:40269,null,null]) java.net.ConnectException: Call From 8fc3ff0a63e6/172.17.0.2 to localhost:33685 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T04:36:00,863 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:00,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:01,194 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/WALs/8fc3ff0a63e6,46195,1732250113188/8fc3ff0a63e6%2C46195%2C1732250113188.1732250113907 to hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/oldWALs/8fc3ff0a63e6%2C46195%2C1732250113188.1732250113907 2024-11-22T04:36:01,199 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/MasterData/oldWALs/8fc3ff0a63e6%2C46195%2C1732250113188.1732250113907 to hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/oldWALs/8fc3ff0a63e6%2C46195%2C1732250113188.1732250113907$masterlocalwal$ 2024-11-22T04:36:01,200 INFO [M:0;8fc3ff0a63e6:46195 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T04:36:01,200 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:36:01,200 INFO [M:0;8fc3ff0a63e6:46195 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46195 2024-11-22T04:36:01,200 INFO [M:0;8fc3ff0a63e6:46195 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:36:01,313 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:36:01,313 INFO [M:0;8fc3ff0a63e6:46195 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:36:01,314 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46195-0x10160d3103f0000, quorum=127.0.0.1:49807, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:36:01,317 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62dc63d8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:01,318 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@424d92de{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:36:01,318 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:36:01,318 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bee76e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:36:01,318 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@665e2468{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir/,STOPPED} 2024-11-22T04:36:01,320 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:36:01,321 WARN [BP-2085118315-172.17.0.2-1732250110796 heartbeating to localhost/127.0.0.1:39459 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:36:01,321 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:36:01,321 WARN [BP-2085118315-172.17.0.2-1732250110796 heartbeating to localhost/127.0.0.1:39459 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2085118315-172.17.0.2-1732250110796 (Datanode Uuid af0b0e36-61ae-483f-a85d-ba85a4dacb3f) service to localhost/127.0.0.1:39459 2024-11-22T04:36:01,320 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4e2d5c94 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2085118315-172.17.0.2-1732250110796:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:40269,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:33685 , LocalHost:localPort 8fc3ff0a63e6/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-22T04:36:01,321 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4e2d5c94 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2085118315-172.17.0.2-1732250110796:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:36561,null,null]) java.io.IOException: No block pool offer service for bpid=BP-2085118315-172.17.0.2-1732250110796 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:01,321 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4e2d5c94 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-2085118315-172.17.0.2-1732250110796:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:40269,null,null], DatanodeInfoWithStorage[127.0.0.1:36561,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-2085118315-172.17.0.2-1732250110796:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:40269,null,null], DatanodeInfoWithStorage[127.0.0.1:36561,null,null]] 2024-11-22T04:36:01,321 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4e2d5c94 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2085118315-172.17.0.2-1732250110796:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:36561,null,null]) java.io.IOException: No block pool offer service for bpid=BP-2085118315-172.17.0.2-1732250110796 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:01,321 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data3/current/BP-2085118315-172.17.0.2-1732250110796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:01,321 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4e2d5c94 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-2085118315-172.17.0.2-1732250110796:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:40269,null,null]) java.io.IOException: No block pool offer service for bpid=BP-2085118315-172.17.0.2-1732250110796 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:01,321 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4e2d5c94 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-2085118315-172.17.0.2-1732250110796:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:36561,null,null], DatanodeInfoWithStorage[127.0.0.1:40269,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-2085118315-172.17.0.2-1732250110796:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:36561,null,null], DatanodeInfoWithStorage[127.0.0.1:40269,null,null]] 2024-11-22T04:36:01,322 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data4/current/BP-2085118315-172.17.0.2-1732250110796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:01,322 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:36:01,324 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@624ef820{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:01,325 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@12827689{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:36:01,325 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:36:01,325 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@698a5081{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:36:01,325 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@d929f15{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir/,STOPPED} 2024-11-22T04:36:01,327 WARN [BP-2085118315-172.17.0.2-1732250110796 heartbeating to localhost/127.0.0.1:39459 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:36:01,327 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:36:01,327 WARN [BP-2085118315-172.17.0.2-1732250110796 heartbeating to localhost/127.0.0.1:39459 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2085118315-172.17.0.2-1732250110796 (Datanode Uuid 154db80d-b446-4abe-899a-1ee90a38892f) service to localhost/127.0.0.1:39459 2024-11-22T04:36:01,327 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:36:01,328 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data5/current/BP-2085118315-172.17.0.2-1732250110796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:01,328 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/cluster_7e15c870-6603-5c8e-52e7-1a94cfb5ab65/data/data6/current/BP-2085118315-172.17.0.2-1732250110796 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:01,328 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:36:01,334 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3feb978b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T04:36:01,335 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@145c0180{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:36:01,335 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:36:01,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7720beab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:36:01,335 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69bbaec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir/,STOPPED} 2024-11-22T04:36:01,343 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T04:36:01,379 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T04:36:01,388 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 79) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39459 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39459 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:38711 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38711 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39459 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39459 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:39459 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:39459 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39459 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:39459 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39459 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f23ecbefdc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:39459 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39459 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007f23ecbefdc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=237 (was 265), ProcessCount=11 (was 11), AvailableMemoryMB=8627 (was 9535) 2024-11-22T04:36:01,395 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=237, ProcessCount=11, AvailableMemoryMB=8626 2024-11-22T04:36:01,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T04:36:01,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.log.dir so I do NOT create it in target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0 2024-11-22T04:36:01,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/0537c356-10b9-0e35-f3a1-16ff0161fa75/hadoop.tmp.dir so I do NOT create it in target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0 2024-11-22T04:36:01,396 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9, deleteOnExit=true 2024-11-22T04:36:01,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T04:36:01,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/test.cache.data in system properties and HBase conf 2024-11-22T04:36:01,396 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T04:36:01,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir in system properties and HBase conf 2024-11-22T04:36:01,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T04:36:01,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T04:36:01,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T04:36:01,397 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T04:36:01,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T04:36:01,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T04:36:01,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T04:36:01,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T04:36:01,397 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T04:36:01,398 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T04:36:01,398 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T04:36:01,398 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T04:36:01,398 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T04:36:01,398 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/nfs.dump.dir in system properties and HBase conf 2024-11-22T04:36:01,398 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/java.io.tmpdir in system properties and HBase conf 2024-11-22T04:36:01,398 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T04:36:01,398 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T04:36:01,398 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T04:36:01,410 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T04:36:01,739 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:36:01,743 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:36:01,745 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:36:01,745 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:36:01,745 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:36:01,745 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:36:01,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@140caf6f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:36:01,748 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2835f29c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:36:01,848 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1fc8bed8{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/java.io.tmpdir/jetty-localhost-37527-hadoop-hdfs-3_4_1-tests_jar-_-any-9504893633855716644/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T04:36:01,848 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@42aa99e7{HTTP/1.1, (http/1.1)}{localhost:37527} 2024-11-22T04:36:01,848 INFO [Time-limited test {}] server.Server(415): Started @159115ms 2024-11-22T04:36:01,860 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T04:36:01,864 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:01,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:02,099 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:36:02,103 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:36:02,104 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:36:02,105 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:36:02,105 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:36:02,105 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33cf8bc2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:36:02,106 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60abc71f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:36:02,210 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@60deb4a2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/java.io.tmpdir/jetty-localhost-36135-hadoop-hdfs-3_4_1-tests_jar-_-any-8842153899836616622/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:02,210 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2a75563d{HTTP/1.1, (http/1.1)}{localhost:36135} 2024-11-22T04:36:02,210 INFO [Time-limited test {}] server.Server(415): Started @159477ms 2024-11-22T04:36:02,212 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:36:02,238 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:36:02,242 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:36:02,243 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:36:02,243 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:36:02,243 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:36:02,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@c07ac8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:36:02,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@392000f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:36:02,358 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62169090{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/java.io.tmpdir/jetty-localhost-35395-hadoop-hdfs-3_4_1-tests_jar-_-any-8765173878681102094/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:02,359 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6b2d1260{HTTP/1.1, (http/1.1)}{localhost:35395} 2024-11-22T04:36:02,359 INFO [Time-limited test {}] server.Server(415): Started @159625ms 2024-11-22T04:36:02,360 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:36:02,865 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:02,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:03,699 WARN [Thread-1204 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data1/current/BP-1454010557-172.17.0.2-1732250161415/current, will proceed with Du for space computation calculation, 2024-11-22T04:36:03,699 WARN [Thread-1205 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data2/current/BP-1454010557-172.17.0.2-1732250161415/current, will proceed with Du for space computation calculation, 2024-11-22T04:36:03,721 WARN [Thread-1168 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:36:03,724 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbfd3cbac275d759 with lease ID 0x679c6d678539d935: Processing first storage report for DS-79a70493-8bea-4268-b101-12b2d4d03047 from datanode DatanodeRegistration(127.0.0.1:38721, datanodeUuid=46de8883-cfbb-4e8c-8037-ed77d701058e, infoPort=33299, infoSecurePort=0, ipcPort=37607, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415) 2024-11-22T04:36:03,724 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfd3cbac275d759 with lease ID 0x679c6d678539d935: from storage DS-79a70493-8bea-4268-b101-12b2d4d03047 node DatanodeRegistration(127.0.0.1:38721, datanodeUuid=46de8883-cfbb-4e8c-8037-ed77d701058e, infoPort=33299, infoSecurePort=0, ipcPort=37607, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T04:36:03,724 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbfd3cbac275d759 with lease ID 0x679c6d678539d935: Processing first storage report for DS-29b9304f-303e-4913-aaf5-ab3d1dca7d76 from datanode DatanodeRegistration(127.0.0.1:38721, datanodeUuid=46de8883-cfbb-4e8c-8037-ed77d701058e, infoPort=33299, infoSecurePort=0, ipcPort=37607, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415) 2024-11-22T04:36:03,724 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbfd3cbac275d759 with lease ID 0x679c6d678539d935: from storage DS-29b9304f-303e-4913-aaf5-ab3d1dca7d76 node DatanodeRegistration(127.0.0.1:38721, datanodeUuid=46de8883-cfbb-4e8c-8037-ed77d701058e, infoPort=33299, infoSecurePort=0, ipcPort=37607, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:36:03,851 WARN [Thread-1216 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data4/current/BP-1454010557-172.17.0.2-1732250161415/current, will proceed with Du for space computation calculation, 2024-11-22T04:36:03,851 WARN [Thread-1215 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data3/current/BP-1454010557-172.17.0.2-1732250161415/current, will proceed with Du for space computation calculation, 2024-11-22T04:36:03,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:03,872 WARN [Thread-1191 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:36:03,874 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d6e70d71a1160d0 with lease ID 0x679c6d678539d936: Processing first storage report for DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f from datanode DatanodeRegistration(127.0.0.1:33615, datanodeUuid=aa48f74c-9c08-4611-b09b-ea41fb154334, infoPort=37853, infoSecurePort=0, ipcPort=32835, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415) 2024-11-22T04:36:03,874 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d6e70d71a1160d0 with lease ID 0x679c6d678539d936: from storage DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f node DatanodeRegistration(127.0.0.1:33615, datanodeUuid=aa48f74c-9c08-4611-b09b-ea41fb154334, infoPort=37853, infoSecurePort=0, ipcPort=32835, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:36:03,875 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5d6e70d71a1160d0 with lease ID 0x679c6d678539d936: Processing first storage report for DS-26b223df-cf10-4451-84f4-53c7073d6628 from datanode DatanodeRegistration(127.0.0.1:33615, datanodeUuid=aa48f74c-9c08-4611-b09b-ea41fb154334, infoPort=37853, infoSecurePort=0, ipcPort=32835, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415) 2024-11-22T04:36:03,875 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5d6e70d71a1160d0 with lease ID 0x679c6d678539d936: from storage DS-26b223df-cf10-4451-84f4-53c7073d6628 node DatanodeRegistration(127.0.0.1:33615, datanodeUuid=aa48f74c-9c08-4611-b09b-ea41fb154334, infoPort=37853, infoSecurePort=0, ipcPort=32835, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:36:03,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:03,895 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0 2024-11-22T04:36:03,925 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/zookeeper_0, clientPort=54652, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T04:36:03,926 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54652 2024-11-22T04:36:03,926 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:03,928 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:03,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33615 is added to blk_1073741825_1001 (size=7) 2024-11-22T04:36:03,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38721 is added to blk_1073741825_1001 (size=7) 2024-11-22T04:36:03,939 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61 with version=8 2024-11-22T04:36:03,939 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/hbase-staging 2024-11-22T04:36:03,942 INFO [Time-limited test {}] client.ConnectionUtils(128): master/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:36:03,942 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:36:03,942 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:36:03,942 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:36:03,942 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:36:03,942 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:36:03,943 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T04:36:03,943 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:36:03,943 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36753 2024-11-22T04:36:03,946 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36753 connecting to ZooKeeper ensemble=127.0.0.1:54652 2024-11-22T04:36:03,994 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:367530x0, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:36:03,995 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36753-0x10160d3d67d0000 connected 2024-11-22T04:36:04,082 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:04,083 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:04,085 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:36:04,086 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61, hbase.cluster.distributed=false 2024-11-22T04:36:04,087 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:36:04,088 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36753 2024-11-22T04:36:04,090 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36753 2024-11-22T04:36:04,090 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36753 2024-11-22T04:36:04,090 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36753 2024-11-22T04:36:04,090 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36753 2024-11-22T04:36:04,107 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:36:04,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:36:04,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:36:04,108 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:36:04,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:36:04,108 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:36:04,108 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T04:36:04,108 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:36:04,109 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34531 2024-11-22T04:36:04,110 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34531 connecting to ZooKeeper ensemble=127.0.0.1:54652 2024-11-22T04:36:04,111 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:04,112 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:04,124 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:345310x0, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:36:04,124 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:345310x0, quorum=127.0.0.1:54652, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:36:04,124 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34531-0x10160d3d67d0001 connected 2024-11-22T04:36:04,124 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T04:36:04,126 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T04:36:04,127 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T04:36:04,128 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:36:04,136 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34531 2024-11-22T04:36:04,138 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34531 2024-11-22T04:36:04,138 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34531 2024-11-22T04:36:04,138 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34531 2024-11-22T04:36:04,138 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34531 2024-11-22T04:36:04,152 DEBUG [M:0;8fc3ff0a63e6:36753 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;8fc3ff0a63e6:36753 2024-11-22T04:36:04,152 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/8fc3ff0a63e6,36753,1732250163942 2024-11-22T04:36:04,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:36:04,164 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:36:04,164 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/8fc3ff0a63e6,36753,1732250163942 2024-11-22T04:36:04,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:04,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T04:36:04,176 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:04,177 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T04:36:04,177 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/8fc3ff0a63e6,36753,1732250163942 from backup master directory 2024-11-22T04:36:04,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/8fc3ff0a63e6,36753,1732250163942 2024-11-22T04:36:04,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:36:04,187 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:36:04,187 WARN [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:36:04,187 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=8fc3ff0a63e6,36753,1732250163942 2024-11-22T04:36:04,191 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/hbase.id] with ID: 892275ea-e4c0-497a-bd63-84ae8c7ff0a2 2024-11-22T04:36:04,192 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/.tmp/hbase.id 2024-11-22T04:36:04,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38721 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:36:04,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33615 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:36:04,198 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/.tmp/hbase.id]:[hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/hbase.id] 2024-11-22T04:36:04,210 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:04,210 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T04:36:04,211 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T04:36:04,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:04,218 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:04,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33615 is added to blk_1073741827_1003 (size=196) 2024-11-22T04:36:04,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38721 is added to blk_1073741827_1003 (size=196) 2024-11-22T04:36:04,225 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T04:36:04,226 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T04:36:04,226 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:36:04,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38721 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:36:04,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33615 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:36:04,236 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store 2024-11-22T04:36:04,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33615 is added to blk_1073741829_1005 (size=34) 2024-11-22T04:36:04,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38721 is added to blk_1073741829_1005 (size=34) 2024-11-22T04:36:04,243 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:36:04,243 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T04:36:04,243 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:36:04,243 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:36:04,243 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T04:36:04,243 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:36:04,243 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:36:04,243 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732250164243Disabling compacts and flushes for region at 1732250164243Disabling writes for close at 1732250164243Writing region close event to WAL at 1732250164243Closed at 1732250164243 2024-11-22T04:36:04,244 WARN [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/.initializing 2024-11-22T04:36:04,244 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/WALs/8fc3ff0a63e6,36753,1732250163942 2024-11-22T04:36:04,248 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C36753%2C1732250163942, suffix=, logDir=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/WALs/8fc3ff0a63e6,36753,1732250163942, archiveDir=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/oldWALs, maxLogs=10 2024-11-22T04:36:04,248 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C36753%2C1732250163942.1732250164248 2024-11-22T04:36:04,253 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/WALs/8fc3ff0a63e6,36753,1732250163942/8fc3ff0a63e6%2C36753%2C1732250163942.1732250164248 2024-11-22T04:36:04,257 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33299:33299),(127.0.0.1/127.0.0.1:37853:37853)] 2024-11-22T04:36:04,258 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:36:04,258 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:36:04,258 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:04,258 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:04,260 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:04,261 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T04:36:04,261 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:04,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:04,262 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:04,263 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T04:36:04,263 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:04,264 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:36:04,264 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:04,265 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T04:36:04,265 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:04,266 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:36:04,266 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:04,267 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T04:36:04,267 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:04,268 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:36:04,268 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:04,269 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:04,269 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:04,270 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:04,271 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:04,271 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T04:36:04,272 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:04,275 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:36:04,275 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=861291, jitterRate=0.09518842399120331}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T04:36:04,276 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732250164258Initializing all the Stores at 1732250164259 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250164259Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250164260 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250164260Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250164260Cleaning up temporary data from old regions at 1732250164271 (+11 ms)Region opened successfully at 1732250164276 (+5 ms) 2024-11-22T04:36:04,276 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T04:36:04,279 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61112a37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:36:04,280 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T04:36:04,280 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T04:36:04,280 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T04:36:04,281 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T04:36:04,281 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T04:36:04,282 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T04:36:04,282 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T04:36:04,286 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T04:36:04,287 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T04:36:04,292 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T04:36:04,292 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T04:36:04,293 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T04:36:04,302 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T04:36:04,303 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T04:36:04,304 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T04:36:04,313 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T04:36:04,314 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T04:36:04,323 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T04:36:04,326 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T04:36:04,334 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T04:36:04,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T04:36:04,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:04,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T04:36:04,345 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:04,345 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=8fc3ff0a63e6,36753,1732250163942, sessionid=0x10160d3d67d0000, setting cluster-up flag (Was=false) 2024-11-22T04:36:04,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:04,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:04,397 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T04:36:04,398 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8fc3ff0a63e6,36753,1732250163942 2024-11-22T04:36:04,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:04,418 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:04,450 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T04:36:04,451 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8fc3ff0a63e6,36753,1732250163942 2024-11-22T04:36:04,452 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T04:36:04,454 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T04:36:04,454 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T04:36:04,454 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T04:36:04,454 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 8fc3ff0a63e6,36753,1732250163942 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T04:36:04,455 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:36:04,455 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:36:04,455 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:36:04,455 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:36:04,456 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/8fc3ff0a63e6:0, corePoolSize=10, maxPoolSize=10 2024-11-22T04:36:04,456 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:04,456 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:36:04,456 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:04,457 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732250194457 2024-11-22T04:36:04,457 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T04:36:04,457 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T04:36:04,457 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T04:36:04,457 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T04:36:04,457 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T04:36:04,457 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T04:36:04,457 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:04,458 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:36:04,458 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T04:36:04,458 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T04:36:04,458 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T04:36:04,458 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T04:36:04,458 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T04:36:04,458 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T04:36:04,458 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250164458,5,FailOnTimeoutGroup] 2024-11-22T04:36:04,458 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250164458,5,FailOnTimeoutGroup] 2024-11-22T04:36:04,459 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:04,459 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T04:36:04,459 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:04,459 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:04,459 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:04,459 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T04:36:04,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38721 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:36:04,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33615 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:36:04,470 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T04:36:04,470 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61 2024-11-22T04:36:04,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38721 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:36:04,479 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:36:04,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33615 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:36:04,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T04:36:04,482 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T04:36:04,482 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:04,483 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:04,483 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T04:36:04,484 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T04:36:04,484 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:04,485 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:04,485 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T04:36:04,487 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T04:36:04,487 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:04,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:04,487 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T04:36:04,488 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T04:36:04,489 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:04,489 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:04,489 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T04:36:04,490 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740 2024-11-22T04:36:04,490 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740 2024-11-22T04:36:04,491 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T04:36:04,492 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T04:36:04,492 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T04:36:04,493 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T04:36:04,496 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:36:04,496 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=747081, jitterRate=-0.05003829300403595}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T04:36:04,497 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732250164479Initializing all the Stores at 1732250164480 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250164480Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250164480Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250164480Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250164480Cleaning up temporary data from old regions at 1732250164492 (+12 ms)Region opened successfully at 1732250164497 (+5 ms) 2024-11-22T04:36:04,497 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T04:36:04,497 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T04:36:04,498 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T04:36:04,498 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T04:36:04,498 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T04:36:04,498 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T04:36:04,498 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732250164497Disabling compacts and flushes for region at 1732250164497Disabling writes for close at 1732250164498 (+1 ms)Writing region close event to WAL at 1732250164498Closed at 1732250164498 2024-11-22T04:36:04,500 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:36:04,500 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T04:36:04,500 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T04:36:04,501 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T04:36:04,502 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T04:36:04,540 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(746): ClusterId : 892275ea-e4c0-497a-bd63-84ae8c7ff0a2 2024-11-22T04:36:04,540 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T04:36:04,546 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T04:36:04,546 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T04:36:04,556 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T04:36:04,556 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67d03189, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:36:04,569 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;8fc3ff0a63e6:34531 2024-11-22T04:36:04,569 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T04:36:04,569 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T04:36:04,569 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T04:36:04,570 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(2659): reportForDuty to master=8fc3ff0a63e6,36753,1732250163942 with port=34531, startcode=1732250164107 2024-11-22T04:36:04,570 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T04:36:04,573 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44591, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T04:36:04,573 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36753 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 8fc3ff0a63e6,34531,1732250164107 2024-11-22T04:36:04,573 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36753 {}] master.ServerManager(517): Registering regionserver=8fc3ff0a63e6,34531,1732250164107 2024-11-22T04:36:04,575 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61 2024-11-22T04:36:04,575 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43513 2024-11-22T04:36:04,575 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T04:36:04,587 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:36:04,587 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] zookeeper.ZKUtil(111): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8fc3ff0a63e6,34531,1732250164107 2024-11-22T04:36:04,587 WARN [RS:0;8fc3ff0a63e6:34531 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:36:04,587 INFO [RS:0;8fc3ff0a63e6:34531 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:36:04,587 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107 2024-11-22T04:36:04,588 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8fc3ff0a63e6,34531,1732250164107] 2024-11-22T04:36:04,591 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T04:36:04,592 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T04:36:04,592 INFO [RS:0;8fc3ff0a63e6:34531 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T04:36:04,592 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:04,593 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T04:36:04,593 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T04:36:04,594 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:04,594 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:04,594 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:04,594 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:04,594 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:04,594 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:04,594 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:36:04,594 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:04,594 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:04,594 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:04,594 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:04,594 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:04,594 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:04,594 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:36:04,594 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:36:04,595 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:04,595 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:04,595 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:04,595 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:04,595 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:04,595 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,34531,1732250164107-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:36:04,609 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T04:36:04,609 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,34531,1732250164107-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:04,609 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:04,609 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.Replication(171): 8fc3ff0a63e6,34531,1732250164107 started 2024-11-22T04:36:04,624 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:04,624 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(1482): Serving as 8fc3ff0a63e6,34531,1732250164107, RpcServer on 8fc3ff0a63e6/172.17.0.2:34531, sessionid=0x10160d3d67d0001 2024-11-22T04:36:04,624 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T04:36:04,624 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8fc3ff0a63e6,34531,1732250164107 2024-11-22T04:36:04,624 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,34531,1732250164107' 2024-11-22T04:36:04,624 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T04:36:04,625 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T04:36:04,625 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T04:36:04,625 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T04:36:04,625 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8fc3ff0a63e6,34531,1732250164107 2024-11-22T04:36:04,625 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,34531,1732250164107' 2024-11-22T04:36:04,625 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T04:36:04,626 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T04:36:04,626 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T04:36:04,626 INFO [RS:0;8fc3ff0a63e6:34531 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T04:36:04,626 INFO [RS:0;8fc3ff0a63e6:34531 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T04:36:04,653 WARN [8fc3ff0a63e6:36753 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T04:36:04,728 INFO [RS:0;8fc3ff0a63e6:34531 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C34531%2C1732250164107, suffix=, logDir=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107, archiveDir=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/oldWALs, maxLogs=32 2024-11-22T04:36:04,730 INFO [RS:0;8fc3ff0a63e6:34531 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 2024-11-22T04:36:04,741 INFO [RS:0;8fc3ff0a63e6:34531 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 2024-11-22T04:36:04,742 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33299:33299),(127.0.0.1/127.0.0.1:37853:37853)] 2024-11-22T04:36:04,866 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:04,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:04,903 DEBUG [8fc3ff0a63e6:36753 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T04:36:04,903 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=8fc3ff0a63e6,34531,1732250164107 2024-11-22T04:36:04,905 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8fc3ff0a63e6,34531,1732250164107, state=OPENING 2024-11-22T04:36:04,913 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T04:36:04,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:04,923 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:04,924 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T04:36:04,924 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,34531,1732250164107}] 2024-11-22T04:36:04,924 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:36:04,924 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:36:05,078 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T04:36:05,080 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37873, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T04:36:05,088 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T04:36:05,088 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:36:05,091 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C34531%2C1732250164107.meta, suffix=.meta, logDir=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107, archiveDir=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/oldWALs, maxLogs=32 2024-11-22T04:36:05,092 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C34531%2C1732250164107.meta.1732250165092.meta 2024-11-22T04:36:05,097 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.meta.1732250165092.meta 2024-11-22T04:36:05,098 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37853:37853),(127.0.0.1/127.0.0.1:33299:33299)] 2024-11-22T04:36:05,098 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:36:05,099 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T04:36:05,099 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T04:36:05,099 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T04:36:05,099 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T04:36:05,099 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:36:05,099 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T04:36:05,099 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T04:36:05,100 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T04:36:05,101 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T04:36:05,101 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:05,102 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:05,102 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T04:36:05,103 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T04:36:05,103 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:05,103 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:05,104 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T04:36:05,104 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T04:36:05,104 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:05,105 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:05,105 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T04:36:05,106 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T04:36:05,106 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:05,107 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:05,107 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T04:36:05,108 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740 2024-11-22T04:36:05,109 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740 2024-11-22T04:36:05,110 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T04:36:05,110 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T04:36:05,111 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T04:36:05,112 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T04:36:05,113 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=731754, jitterRate=-0.06952786445617676}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T04:36:05,113 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T04:36:05,114 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732250165099Writing region info on filesystem at 1732250165099Initializing all the Stores at 1732250165100 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250165100Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250165100Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250165100Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250165100Cleaning up temporary data from old regions at 1732250165110 (+10 ms)Running coprocessor post-open hooks at 1732250165113 (+3 ms)Region opened successfully at 1732250165114 (+1 ms) 2024-11-22T04:36:05,115 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732250165077 2024-11-22T04:36:05,118 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T04:36:05,118 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T04:36:05,119 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=8fc3ff0a63e6,34531,1732250164107 2024-11-22T04:36:05,120 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8fc3ff0a63e6,34531,1732250164107, state=OPEN 2024-11-22T04:36:05,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T04:36:05,199 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T04:36:05,199 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,34531,1732250164107 2024-11-22T04:36:05,199 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:36:05,199 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:36:05,204 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T04:36:05,204 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,34531,1732250164107 in 275 msec 2024-11-22T04:36:05,208 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T04:36:05,209 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 704 msec 2024-11-22T04:36:05,210 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:36:05,210 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T04:36:05,211 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T04:36:05,211 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8fc3ff0a63e6,34531,1732250164107, seqNum=-1] 2024-11-22T04:36:05,212 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T04:36:05,213 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43367, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T04:36:05,219 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 765 msec 2024-11-22T04:36:05,219 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732250165219, completionTime=-1 2024-11-22T04:36:05,219 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T04:36:05,219 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T04:36:05,221 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T04:36:05,221 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732250225221 2024-11-22T04:36:05,221 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732250285221 2024-11-22T04:36:05,221 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T04:36:05,222 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,36753,1732250163942-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:05,222 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,36753,1732250163942-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:05,222 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,36753,1732250163942-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:05,222 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-8fc3ff0a63e6:36753, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:05,222 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:05,222 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:05,225 DEBUG [master/8fc3ff0a63e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T04:36:05,227 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.040sec 2024-11-22T04:36:05,227 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T04:36:05,227 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T04:36:05,227 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T04:36:05,227 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T04:36:05,227 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T04:36:05,227 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,36753,1732250163942-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:36:05,227 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,36753,1732250163942-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T04:36:05,230 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T04:36:05,230 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T04:36:05,231 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,36753,1732250163942-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:05,241 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@117b424d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:36:05,241 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 8fc3ff0a63e6,36753,-1 for getting cluster id 2024-11-22T04:36:05,241 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T04:36:05,243 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '892275ea-e4c0-497a-bd63-84ae8c7ff0a2' 2024-11-22T04:36:05,243 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T04:36:05,243 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "892275ea-e4c0-497a-bd63-84ae8c7ff0a2" 2024-11-22T04:36:05,244 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@13901fe3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:36:05,244 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [8fc3ff0a63e6,36753,-1] 2024-11-22T04:36:05,244 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T04:36:05,244 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:36:05,246 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45668, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T04:36:05,247 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10e70018, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:36:05,248 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T04:36:05,249 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8fc3ff0a63e6,34531,1732250164107, seqNum=-1] 2024-11-22T04:36:05,250 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T04:36:05,252 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40500, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T04:36:05,254 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=8fc3ff0a63e6,36753,1732250163942 2024-11-22T04:36:05,254 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:05,257 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T04:36:05,257 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-22T04:36:05,258 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-22T04:36:05,258 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T04:36:05,259 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 8fc3ff0a63e6,36753,1732250163942 2024-11-22T04:36:05,259 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4f16672b 2024-11-22T04:36:05,259 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T04:36:05,261 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45680, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T04:36:05,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36753 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T04:36:05,262 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36753 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T04:36:05,262 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36753 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T04:36:05,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36753 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T04:36:05,265 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T04:36:05,265 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:05,265 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36753 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-22T04:36:05,266 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T04:36:05,266 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T04:36:05,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33615 is added to blk_1073741835_1011 (size=395) 2024-11-22T04:36:05,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38721 is added to blk_1073741835_1011 (size=395) 2024-11-22T04:36:05,275 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 4c8f356c450202ce6e8c94845d9dc992, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61 2024-11-22T04:36:05,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33615 is added to blk_1073741836_1012 (size=78) 2024-11-22T04:36:05,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38721 is added to blk_1073741836_1012 (size=78) 2024-11-22T04:36:05,290 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:36:05,290 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 4c8f356c450202ce6e8c94845d9dc992, disabling compactions & flushes 2024-11-22T04:36:05,290 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. 2024-11-22T04:36:05,290 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. 2024-11-22T04:36:05,290 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. after waiting 0 ms 2024-11-22T04:36:05,290 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. 2024-11-22T04:36:05,290 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. 2024-11-22T04:36:05,290 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 4c8f356c450202ce6e8c94845d9dc992: Waiting for close lock at 1732250165290Disabling compacts and flushes for region at 1732250165290Disabling writes for close at 1732250165290Writing region close event to WAL at 1732250165290Closed at 1732250165290 2024-11-22T04:36:05,292 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T04:36:05,292 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732250165292"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732250165292"}]},"ts":"1732250165292"} 2024-11-22T04:36:05,294 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T04:36:05,296 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T04:36:05,296 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732250165296"}]},"ts":"1732250165296"} 2024-11-22T04:36:05,298 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-22T04:36:05,299 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4c8f356c450202ce6e8c94845d9dc992, ASSIGN}] 2024-11-22T04:36:05,300 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4c8f356c450202ce6e8c94845d9dc992, ASSIGN 2024-11-22T04:36:05,301 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4c8f356c450202ce6e8c94845d9dc992, ASSIGN; state=OFFLINE, location=8fc3ff0a63e6,34531,1732250164107; forceNewPlan=false, retain=false 2024-11-22T04:36:05,452 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4c8f356c450202ce6e8c94845d9dc992, regionState=OPENING, regionLocation=8fc3ff0a63e6,34531,1732250164107 2024-11-22T04:36:05,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4c8f356c450202ce6e8c94845d9dc992, ASSIGN because future has completed 2024-11-22T04:36:05,457 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4c8f356c450202ce6e8c94845d9dc992, server=8fc3ff0a63e6,34531,1732250164107}] 2024-11-22T04:36:05,500 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T04:36:05,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:05,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:05,519 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:05,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:05,520 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:05,521 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:05,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:05,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:05,525 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:05,528 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:05,615 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. 2024-11-22T04:36:05,615 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 4c8f356c450202ce6e8c94845d9dc992, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992.', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:36:05,616 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 4c8f356c450202ce6e8c94845d9dc992 2024-11-22T04:36:05,616 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:36:05,616 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 4c8f356c450202ce6e8c94845d9dc992 2024-11-22T04:36:05,616 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 4c8f356c450202ce6e8c94845d9dc992 2024-11-22T04:36:05,617 INFO [StoreOpener-4c8f356c450202ce6e8c94845d9dc992-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 4c8f356c450202ce6e8c94845d9dc992 2024-11-22T04:36:05,619 INFO [StoreOpener-4c8f356c450202ce6e8c94845d9dc992-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4c8f356c450202ce6e8c94845d9dc992 columnFamilyName info 2024-11-22T04:36:05,619 DEBUG [StoreOpener-4c8f356c450202ce6e8c94845d9dc992-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:05,620 INFO [StoreOpener-4c8f356c450202ce6e8c94845d9dc992-1 {}] regionserver.HStore(327): Store=4c8f356c450202ce6e8c94845d9dc992/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:36:05,620 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 4c8f356c450202ce6e8c94845d9dc992 2024-11-22T04:36:05,621 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/default/TestLogRolling-testLogRollOnPipelineRestart/4c8f356c450202ce6e8c94845d9dc992 2024-11-22T04:36:05,621 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/default/TestLogRolling-testLogRollOnPipelineRestart/4c8f356c450202ce6e8c94845d9dc992 2024-11-22T04:36:05,622 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 4c8f356c450202ce6e8c94845d9dc992 2024-11-22T04:36:05,622 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 4c8f356c450202ce6e8c94845d9dc992 2024-11-22T04:36:05,624 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 4c8f356c450202ce6e8c94845d9dc992 2024-11-22T04:36:05,626 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/default/TestLogRolling-testLogRollOnPipelineRestart/4c8f356c450202ce6e8c94845d9dc992/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:36:05,626 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 4c8f356c450202ce6e8c94845d9dc992; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=728717, jitterRate=-0.07338957488536835}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T04:36:05,627 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4c8f356c450202ce6e8c94845d9dc992 2024-11-22T04:36:05,628 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 4c8f356c450202ce6e8c94845d9dc992: Running coprocessor pre-open hook at 1732250165616Writing region info on filesystem at 1732250165616Initializing all the Stores at 1732250165617 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250165617Cleaning up temporary data from old regions at 1732250165622 (+5 ms)Running coprocessor post-open hooks at 1732250165627 (+5 ms)Region opened successfully at 1732250165627 2024-11-22T04:36:05,629 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992., pid=6, masterSystemTime=1732250165611 2024-11-22T04:36:05,631 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. 2024-11-22T04:36:05,631 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. 2024-11-22T04:36:05,632 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=4c8f356c450202ce6e8c94845d9dc992, regionState=OPEN, openSeqNum=2, regionLocation=8fc3ff0a63e6,34531,1732250164107 2024-11-22T04:36:05,634 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4c8f356c450202ce6e8c94845d9dc992, server=8fc3ff0a63e6,34531,1732250164107 because future has completed 2024-11-22T04:36:05,638 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T04:36:05,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 4c8f356c450202ce6e8c94845d9dc992, server=8fc3ff0a63e6,34531,1732250164107 in 179 msec 2024-11-22T04:36:05,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T04:36:05,641 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=4c8f356c450202ce6e8c94845d9dc992, ASSIGN in 340 msec 2024-11-22T04:36:05,642 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T04:36:05,643 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732250165642"}]},"ts":"1732250165642"} 2024-11-22T04:36:05,645 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-22T04:36:05,646 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T04:36:05,649 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 384 msec 2024-11-22T04:36:05,867 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:05,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:06,868 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:06,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:07,869 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:07,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:08,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:08,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:09,870 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:09,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:09,997 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T04:36:09,997 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T04:36:09,998 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T04:36:09,998 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-22T04:36:09,998 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T04:36:09,998 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T04:36:09,998 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T04:36:09,998 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-22T04:36:10,601 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T04:36:10,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:10,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:10,626 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:10,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:10,627 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:10,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:10,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:10,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:10,632 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:10,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:10,642 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T04:36:10,642 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-22T04:36:10,871 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:10,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:11,872 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:11,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:12,873 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:12,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:13,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:13,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:14,874 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:14,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:15,327 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36753 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T04:36:15,328 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-22T04:36:15,328 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-22T04:36:15,332 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T04:36:15,332 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. 2024-11-22T04:36:15,338 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992., hostname=8fc3ff0a63e6,34531,1732250164107, seqNum=2] 2024-11-22T04:36:15,875 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:15,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:16,876 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:16,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:17,341 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 2024-11-22T04:36:17,342 WARN [ResponseProcessor for block BP-1454010557-172.17.0.2-1732250161415:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1454010557-172.17.0.2-1732250161415:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:17,342 WARN [ResponseProcessor for block BP-1454010557-172.17.0.2-1732250161415:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1454010557-172.17.0.2-1732250161415:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-1454010557-172.17.0.2-1732250161415:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:33615,DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:17,342 WARN [DataStreamer for file /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.meta.1732250165092.meta block BP-1454010557-172.17.0.2-1732250161415:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1454010557-172.17.0.2-1732250161415:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33615,DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f,DISK], DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:33615,DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f,DISK]) is bad. 2024-11-22T04:36:17,342 WARN [DataStreamer for file /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/WALs/8fc3ff0a63e6,36753,1732250163942/8fc3ff0a63e6%2C36753%2C1732250163942.1732250164248 block BP-1454010557-172.17.0.2-1732250161415:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1454010557-172.17.0.2-1732250161415:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK], DatanodeInfoWithStorage[127.0.0.1:33615,DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33615,DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f,DISK]) is bad. 2024-11-22T04:36:17,342 WARN [ResponseProcessor for block BP-1454010557-172.17.0.2-1732250161415:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1454010557-172.17.0.2-1732250161415:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-1454010557-172.17.0.2-1732250161415:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:33615,DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:17,342 WARN [PacketResponder: BP-1454010557-172.17.0.2-1732250161415:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33615] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:17,343 WARN [PacketResponder: BP-1454010557-172.17.0.2-1732250161415:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:33615] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:17,343 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-566302414_22 at /127.0.0.1:51410 [Receiving block BP-1454010557-172.17.0.2-1732250161415:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51410 dst: /127.0.0.1:33615 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:17,343 WARN [DataStreamer for file /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 block BP-1454010557-172.17.0.2-1732250161415:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1454010557-172.17.0.2-1732250161415:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK], DatanodeInfoWithStorage[127.0.0.1:33615,DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:33615,DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f,DISK]) is bad. 2024-11-22T04:36:17,343 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-14051178_22 at /127.0.0.1:40382 [Receiving block BP-1454010557-172.17.0.2-1732250161415:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38721:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40382 dst: /127.0.0.1:38721 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:17,343 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-566302414_22 at /127.0.0.1:40424 [Receiving block BP-1454010557-172.17.0.2-1732250161415:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38721:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40424 dst: /127.0.0.1:38721 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:17,344 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-566302414_22 at /127.0.0.1:40438 [Receiving block BP-1454010557-172.17.0.2-1732250161415:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38721:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40438 dst: /127.0.0.1:38721 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:17,343 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-566302414_22 at /127.0.0.1:51404 [Receiving block BP-1454010557-172.17.0.2-1732250161415:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51404 dst: /127.0.0.1:33615 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:17,344 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-14051178_22 at /127.0.0.1:51376 [Receiving block BP-1454010557-172.17.0.2-1732250161415:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33615:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51376 dst: /127.0.0.1:33615 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:17,424 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62169090{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:17,425 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6b2d1260{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:36:17,425 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:36:17,425 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@392000f9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:36:17,425 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@c07ac8c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir/,STOPPED} 2024-11-22T04:36:17,427 WARN [BP-1454010557-172.17.0.2-1732250161415 heartbeating to localhost/127.0.0.1:43513 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:36:17,427 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:36:17,427 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:36:17,427 WARN [BP-1454010557-172.17.0.2-1732250161415 heartbeating to localhost/127.0.0.1:43513 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1454010557-172.17.0.2-1732250161415 (Datanode Uuid aa48f74c-9c08-4611-b09b-ea41fb154334) service to localhost/127.0.0.1:43513 2024-11-22T04:36:17,427 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data3/current/BP-1454010557-172.17.0.2-1732250161415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:17,428 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data4/current/BP-1454010557-172.17.0.2-1732250161415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:17,428 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:36:17,438 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:36:17,444 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:36:17,445 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:36:17,445 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:36:17,445 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:36:17,446 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5036ecf5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:36:17,446 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6d5ab907{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:36:17,550 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7939cb3e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/java.io.tmpdir/jetty-localhost-44321-hadoop-hdfs-3_4_1-tests_jar-_-any-7807469895415855827/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:17,550 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3146549d{HTTP/1.1, (http/1.1)}{localhost:44321} 2024-11-22T04:36:17,551 INFO [Time-limited test {}] server.Server(415): Started @174817ms 2024-11-22T04:36:17,552 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:36:17,574 WARN [ResponseProcessor for block BP-1454010557-172.17.0.2-1732250161415:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1454010557-172.17.0.2-1732250161415:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:17,574 WARN [ResponseProcessor for block BP-1454010557-172.17.0.2-1732250161415:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1454010557-172.17.0.2-1732250161415:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:17,574 WARN [ResponseProcessor for block BP-1454010557-172.17.0.2-1732250161415:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1454010557-172.17.0.2-1732250161415:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:17,575 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-566302414_22 at /127.0.0.1:55924 [Receiving block BP-1454010557-172.17.0.2-1732250161415:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:38721:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55924 dst: /127.0.0.1:38721 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:17,575 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-566302414_22 at /127.0.0.1:55926 [Receiving block BP-1454010557-172.17.0.2-1732250161415:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:38721:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55926 dst: /127.0.0.1:38721 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:17,576 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-14051178_22 at /127.0.0.1:55936 [Receiving block BP-1454010557-172.17.0.2-1732250161415:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:38721:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55936 dst: /127.0.0.1:38721 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:17,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@60deb4a2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:17,577 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2a75563d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:36:17,577 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:36:17,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60abc71f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:36:17,577 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33cf8bc2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir/,STOPPED} 2024-11-22T04:36:17,578 WARN [BP-1454010557-172.17.0.2-1732250161415 heartbeating to localhost/127.0.0.1:43513 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:36:17,578 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:36:17,578 WARN [BP-1454010557-172.17.0.2-1732250161415 heartbeating to localhost/127.0.0.1:43513 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1454010557-172.17.0.2-1732250161415 (Datanode Uuid 46de8883-cfbb-4e8c-8037-ed77d701058e) service to localhost/127.0.0.1:43513 2024-11-22T04:36:17,578 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:36:17,579 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data1/current/BP-1454010557-172.17.0.2-1732250161415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:17,579 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data2/current/BP-1454010557-172.17.0.2-1732250161415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:17,579 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:36:17,586 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:36:17,589 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:36:17,590 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:36:17,590 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:36:17,590 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T04:36:17,591 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ed6e4b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:36:17,591 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@168cc4fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:36:17,694 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7269a538{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/java.io.tmpdir/jetty-localhost-40223-hadoop-hdfs-3_4_1-tests_jar-_-any-6173115106689616734/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:17,694 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ec1c56{HTTP/1.1, (http/1.1)}{localhost:40223} 2024-11-22T04:36:17,694 INFO [Time-limited test {}] server.Server(415): Started @174961ms 2024-11-22T04:36:17,695 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:36:17,877 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:17,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:18,169 WARN [Thread-1340 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:36:18,172 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x56c83de4584c03a5 with lease ID 0x679c6d678539d937: from storage DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f node DatanodeRegistration(127.0.0.1:35057, datanodeUuid=aa48f74c-9c08-4611-b09b-ea41fb154334, infoPort=44565, infoSecurePort=0, ipcPort=38049, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:36:18,172 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x56c83de4584c03a5 with lease ID 0x679c6d678539d937: from storage DS-26b223df-cf10-4451-84f4-53c7073d6628 node DatanodeRegistration(127.0.0.1:35057, datanodeUuid=aa48f74c-9c08-4611-b09b-ea41fb154334, infoPort=44565, infoSecurePort=0, ipcPort=38049, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:36:18,315 WARN [Thread-1360 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:36:18,318 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf01dbdacb4207d6a with lease ID 0x679c6d678539d938: from storage DS-79a70493-8bea-4268-b101-12b2d4d03047 node DatanodeRegistration(127.0.0.1:46557, datanodeUuid=46de8883-cfbb-4e8c-8037-ed77d701058e, infoPort=42039, infoSecurePort=0, ipcPort=45521, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:36:18,318 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf01dbdacb4207d6a with lease ID 0x679c6d678539d938: from storage DS-29b9304f-303e-4913-aaf5-ab3d1dca7d76 node DatanodeRegistration(127.0.0.1:46557, datanodeUuid=46de8883-cfbb-4e8c-8037-ed77d701058e, infoPort=42039, infoSecurePort=0, ipcPort=45521, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:36:18,743 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-22T04:36:18,746 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-22T04:36:18,748 ERROR [FSHLog-0-hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61-prefix:8fc3ff0a63e6,34531,1732250164107 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:18,748 WARN [FSHLog-0-hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61-prefix:8fc3ff0a63e6,34531,1732250164107 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:18,748 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C34531%2C1732250164107:(num 1732250164729) roll requested 2024-11-22T04:36:18,749 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 2024-11-22T04:36:18,758 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 newFile=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 2024-11-22T04:36:18,759 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:18,759 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:18,759 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:18,759 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:18,759 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:18,759 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 2024-11-22T04:36:18,759 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:18,760 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:18,760 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 2024-11-22T04:36:18,760 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44565:44565),(127.0.0.1/127.0.0.1:42039:42039)] 2024-11-22T04:36:18,760 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 is not closed yet, will try archiving it next time 2024-11-22T04:36:18,760 WARN [IPC Server handler 1 on default port 43513 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-22T04:36:18,760 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 after 0ms 2024-11-22T04:36:18,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:18,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:19,878 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:19,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:20,172 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T04:36:20,765 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-22T04:36:20,879 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:20,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:21,880 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:21,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:22,761 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 after 4001ms 2024-11-22T04:36:22,769 WARN [ResponseProcessor for block BP-1454010557-172.17.0.2-1732250161415:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1454010557-172.17.0.2-1732250161415:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1454010557-172.17.0.2-1732250161415:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:46557,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:22,769 WARN [DataStreamer for file /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 block BP-1454010557-172.17.0.2-1732250161415:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1454010557-172.17.0.2-1732250161415:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35057,DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f,DISK], DatanodeInfoWithStorage[127.0.0.1:46557,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:46557,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]) is bad. 2024-11-22T04:36:22,769 WARN [PacketResponder: BP-1454010557-172.17.0.2-1732250161415:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:46557] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:22,770 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-566302414_22 at /127.0.0.1:55368 [Receiving block BP-1454010557-172.17.0.2-1732250161415:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55368 dst: /127.0.0.1:35057 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:22,770 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-566302414_22 at /127.0.0.1:55046 [Receiving block BP-1454010557-172.17.0.2-1732250161415:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:46557:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55046 dst: /127.0.0.1:46557 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:22,806 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7269a538{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:22,806 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ec1c56{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:36:22,806 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:36:22,806 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@168cc4fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:36:22,807 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ed6e4b8{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir/,STOPPED} 2024-11-22T04:36:22,808 WARN [BP-1454010557-172.17.0.2-1732250161415 heartbeating to localhost/127.0.0.1:43513 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:36:22,808 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:36:22,808 WARN [BP-1454010557-172.17.0.2-1732250161415 heartbeating to localhost/127.0.0.1:43513 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1454010557-172.17.0.2-1732250161415 (Datanode Uuid 46de8883-cfbb-4e8c-8037-ed77d701058e) service to localhost/127.0.0.1:43513 2024-11-22T04:36:22,808 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:36:22,808 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data1/current/BP-1454010557-172.17.0.2-1732250161415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:22,809 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data2/current/BP-1454010557-172.17.0.2-1732250161415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:22,809 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:36:22,822 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:36:22,826 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:36:22,829 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:36:22,829 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:36:22,830 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T04:36:22,830 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f0690d5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:36:22,830 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10b7c71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:36:22,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:22,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:22,931 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@66269315{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/java.io.tmpdir/jetty-localhost-42389-hadoop-hdfs-3_4_1-tests_jar-_-any-2260494268044787434/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:22,932 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1bc7279c{HTTP/1.1, (http/1.1)}{localhost:42389} 2024-11-22T04:36:22,932 INFO [Time-limited test {}] server.Server(415): Started @180198ms 2024-11-22T04:36:22,933 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:36:22,950 WARN [ResponseProcessor for block BP-1454010557-172.17.0.2-1732250161415:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1454010557-172.17.0.2-1732250161415:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:22,951 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-566302414_22 at /127.0.0.1:42440 [Receiving block BP-1454010557-172.17.0.2-1732250161415:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35057:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42440 dst: /127.0.0.1:35057 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:22,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7939cb3e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:22,953 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3146549d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:36:22,953 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:36:22,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6d5ab907{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:36:22,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5036ecf5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir/,STOPPED} 2024-11-22T04:36:22,954 WARN [BP-1454010557-172.17.0.2-1732250161415 heartbeating to localhost/127.0.0.1:43513 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:36:22,954 WARN [BP-1454010557-172.17.0.2-1732250161415 heartbeating to localhost/127.0.0.1:43513 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1454010557-172.17.0.2-1732250161415 (Datanode Uuid aa48f74c-9c08-4611-b09b-ea41fb154334) service to localhost/127.0.0.1:43513 2024-11-22T04:36:22,954 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:36:22,954 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:36:22,955 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data3/current/BP-1454010557-172.17.0.2-1732250161415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:22,955 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data4/current/BP-1454010557-172.17.0.2-1732250161415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:22,955 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:36:22,968 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:36:22,976 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:36:22,977 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:36:22,977 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:36:22,977 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:36:22,977 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@278dfe13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:36:22,977 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2a260e23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:36:23,088 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@23b7049e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/java.io.tmpdir/jetty-localhost-36785-hadoop-hdfs-3_4_1-tests_jar-_-any-13422794303226501583/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:23,089 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@72d2e6e7{HTTP/1.1, (http/1.1)}{localhost:36785} 2024-11-22T04:36:23,089 INFO [Time-limited test {}] server.Server(415): Started @180355ms 2024-11-22T04:36:23,090 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:36:23,516 WARN [Thread-1414 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:36:23,518 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2bbfd5d9aa22a39a with lease ID 0x679c6d678539d939: from storage DS-79a70493-8bea-4268-b101-12b2d4d03047 node DatanodeRegistration(127.0.0.1:46443, datanodeUuid=46de8883-cfbb-4e8c-8037-ed77d701058e, infoPort=39723, infoSecurePort=0, ipcPort=40003, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:36:23,519 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2bbfd5d9aa22a39a with lease ID 0x679c6d678539d939: from storage DS-29b9304f-303e-4913-aaf5-ab3d1dca7d76 node DatanodeRegistration(127.0.0.1:46443, datanodeUuid=46de8883-cfbb-4e8c-8037-ed77d701058e, infoPort=39723, infoSecurePort=0, ipcPort=40003, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:36:23,611 WARN [Thread-1434 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:36:23,613 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaeba72f72c4e7c1e with lease ID 0x679c6d678539d93a: from storage DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f node DatanodeRegistration(127.0.0.1:40899, datanodeUuid=aa48f74c-9c08-4611-b09b-ea41fb154334, infoPort=38171, infoSecurePort=0, ipcPort=45321, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:36:23,613 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xaeba72f72c4e7c1e with lease ID 0x679c6d678539d93a: from storage DS-26b223df-cf10-4451-84f4-53c7073d6628 node DatanodeRegistration(127.0.0.1:40899, datanodeUuid=aa48f74c-9c08-4611-b09b-ea41fb154334, infoPort=38171, infoSecurePort=0, ipcPort=45321, storageInfo=lv=-57;cid=testClusterID;nsid=1740501497;c=1732250161415), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:36:23,881 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:23,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:24,107 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-22T04:36:24,110 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-22T04:36:24,112 ERROR [FSHLog-0-hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61-prefix:8fc3ff0a63e6,34531,1732250164107 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35057,DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:24,112 WARN [FSHLog-0-hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61-prefix:8fc3ff0a63e6,34531,1732250164107 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35057,DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:24,112 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C34531%2C1732250164107:(num 1732250178748) roll requested 2024-11-22T04:36:24,113 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C34531%2C1732250164107.1732250184113 2024-11-22T04:36:24,120 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 newFile=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250184113 2024-11-22T04:36:24,120 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:24,120 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:24,120 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:24,120 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:24,120 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:24,120 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250184113 2024-11-22T04:36:24,120 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35057,DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:24,121 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35057,DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:24,121 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 2024-11-22T04:36:24,121 WARN [IPC Server handler 4 on default port 43513 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-22T04:36:24,122 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 after 1ms 2024-11-22T04:36:24,132 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38171:38171),(127.0.0.1/127.0.0.1:39723:39723)] 2024-11-22T04:36:24,133 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 is not closed yet, will try archiving it next time 2024-11-22T04:36:24,882 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:24,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:25,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:25,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:26,134 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 2024-11-22T04:36:26,145 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250184113 newFile=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 2024-11-22T04:36:26,145 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:26,146 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:26,146 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:26,146 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:26,146 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:26,146 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250184113 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 2024-11-22T04:36:26,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46443 is added to blk_1073741838_1019 (size=1264) 2024-11-22T04:36:26,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40899 is added to blk_1073741838_1019 (size=1264) 2024-11-22T04:36:26,149 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 is not closed yet, will try archiving it next time 2024-11-22T04:36:26,153 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39723:39723),(127.0.0.1/127.0.0.1:38171:38171)] 2024-11-22T04:36:26,153 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 is not closed yet, will try archiving it next time 2024-11-22T04:36:26,153 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 2024-11-22T04:36:26,153 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 2024-11-22T04:36:26,154 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 after 1ms 2024-11-22T04:36:26,154 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 2024-11-22T04:36:26,163 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732250165628/Put/vlen=218/seqid=0] 2024-11-22T04:36:26,163 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732250175339/Put/vlen=1045/seqid=0] 2024-11-22T04:36:26,164 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250164729 2024-11-22T04:36:26,164 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 2024-11-22T04:36:26,164 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 2024-11-22T04:36:26,164 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 after 0ms 2024-11-22T04:36:26,164 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 2024-11-22T04:36:26,168 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732250178747/Put/vlen=1045/seqid=0] 2024-11-22T04:36:26,168 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732250180766/Put/vlen=1045/seqid=0] 2024-11-22T04:36:26,168 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 2024-11-22T04:36:26,168 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250184113 2024-11-22T04:36:26,168 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250184113 2024-11-22T04:36:26,169 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250184113 after 1ms 2024-11-22T04:36:26,169 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250184113 2024-11-22T04:36:26,172 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732250184112/Put/vlen=1045/seqid=0] 2024-11-22T04:36:26,172 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 2024-11-22T04:36:26,172 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 2024-11-22T04:36:26,173 WARN [IPC Server handler 0 on default port 43513 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-22T04:36:26,173 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 after 1ms 2024-11-22T04:36:26,518 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T04:36:26,619 WARN [ResponseProcessor for block BP-1454010557-172.17.0.2-1732250161415:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1454010557-172.17.0.2-1732250161415:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:26,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-14051178_22 at /127.0.0.1:56422 [Receiving block BP-1454010557-172.17.0.2-1732250161415:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:46443:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56422 dst: /127.0.0.1:46443 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:46443 remote=/127.0.0.1:56422]. Total timeout mills is 60000, 59525 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:26,619 WARN [DataStreamer for file /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 block BP-1454010557-172.17.0.2-1732250161415:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1454010557-172.17.0.2-1732250161415:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46443,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK], DatanodeInfoWithStorage[127.0.0.1:40899,DS-ef2f3a9e-d3cc-4636-943c-e486e8bd5b2f,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:46443,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]) is bad. 2024-11-22T04:36:26,619 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-14051178_22 at /127.0.0.1:55982 [Receiving block BP-1454010557-172.17.0.2-1732250161415:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:40899:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55982 dst: /127.0.0.1:40899 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:36:26,620 WARN [DataStreamer for file /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 block BP-1454010557-172.17.0.2-1732250161415:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1454010557-172.17.0.2-1732250161415:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:26,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46443 is added to blk_1073741839_1022 (size=85) 2024-11-22T04:36:26,883 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:26,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:27,884 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:27,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:28,123 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250178748 after 4002ms 2024-11-22T04:36:28,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:28,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:29,885 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:29,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:30,174 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 after 4002ms 2024-11-22T04:36:30,174 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 2024-11-22T04:36:30,178 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 2024-11-22T04:36:30,179 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-22T04:36:30,179 ERROR [FSHLog-0-hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61-prefix:8fc3ff0a63e6,34531,1732250164107.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:30,179 WARN [FSHLog-0-hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61-prefix:8fc3ff0a63e6,34531,1732250164107.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:30,179 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C34531%2C1732250164107.meta:.meta(num 1732250165092) roll requested 2024-11-22T04:36:30,180 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C34531%2C1732250164107.meta.1732250190180.meta 2024-11-22T04:36:30,185 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,186 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,186 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,186 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,186 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,186 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.meta.1732250165092.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.meta.1732250190180.meta 2024-11-22T04:36:30,189 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:30,190 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:30,190 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.meta.1732250165092.meta 2024-11-22T04:36:30,190 WARN [IPC Server handler 4 on default port 43513 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.meta.1732250165092.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1013 2024-11-22T04:36:30,190 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.meta.1732250165092.meta after 0ms 2024-11-22T04:36:30,197 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:38171:38171),(127.0.0.1/127.0.0.1:39723:39723)] 2024-11-22T04:36:30,198 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.meta.1732250165092.meta is not closed yet, will try archiving it next time 2024-11-22T04:36:30,214 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/.tmp/info/8bdbb625a7e442f2892ecc890b57128d is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992./info:regioninfo/1732250165632/Put/seqid=0 2024-11-22T04:36:30,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46443 is added to blk_1073741841_1025 (size=7125) 2024-11-22T04:36:30,220 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40899 is added to blk_1073741841_1025 (size=7125) 2024-11-22T04:36:30,220 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/.tmp/info/8bdbb625a7e442f2892ecc890b57128d 2024-11-22T04:36:30,242 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/.tmp/ns/7417171b19bc488fb1e1df459006df02 is 43, key is default/ns:d/1732250165213/Put/seqid=0 2024-11-22T04:36:30,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46443 is added to blk_1073741842_1026 (size=5153) 2024-11-22T04:36:30,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40899 is added to blk_1073741842_1026 (size=5153) 2024-11-22T04:36:30,247 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/.tmp/ns/7417171b19bc488fb1e1df459006df02 2024-11-22T04:36:30,268 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/.tmp/table/63a12804738246c595778ac451361169 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732250165642/Put/seqid=0 2024-11-22T04:36:30,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40899 is added to blk_1073741843_1027 (size=5438) 2024-11-22T04:36:30,274 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/.tmp/table/63a12804738246c595778ac451361169 2024-11-22T04:36:30,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46443 is added to blk_1073741843_1027 (size=5438) 2024-11-22T04:36:30,280 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/.tmp/info/8bdbb625a7e442f2892ecc890b57128d as hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/info/8bdbb625a7e442f2892ecc890b57128d 2024-11-22T04:36:30,286 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/info/8bdbb625a7e442f2892ecc890b57128d, entries=10, sequenceid=11, filesize=7.0 K 2024-11-22T04:36:30,287 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/.tmp/ns/7417171b19bc488fb1e1df459006df02 as hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/ns/7417171b19bc488fb1e1df459006df02 2024-11-22T04:36:30,293 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/ns/7417171b19bc488fb1e1df459006df02, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T04:36:30,294 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/.tmp/table/63a12804738246c595778ac451361169 as hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/table/63a12804738246c595778ac451361169 2024-11-22T04:36:30,301 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/table/63a12804738246c595778ac451361169, entries=2, sequenceid=11, filesize=5.3 K 2024-11-22T04:36:30,302 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 124ms, sequenceid=11, compaction requested=false 2024-11-22T04:36:30,303 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T04:36:30,303 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 4c8f356c450202ce6e8c94845d9dc992 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-22T04:36:30,303 ERROR [FSHLog-0-hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61-prefix:8fc3ff0a63e6,34531,1732250164107 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1454010557-172.17.0.2-1732250161415:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:30,303 WARN [FSHLog-0-hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61-prefix:8fc3ff0a63e6,34531,1732250164107 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1454010557-172.17.0.2-1732250161415:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:30,304 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C34531%2C1732250164107:(num 1732250186134) roll requested 2024-11-22T04:36:30,304 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C34531%2C1732250164107.1732250190304 2024-11-22T04:36:30,309 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 newFile=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250190304 2024-11-22T04:36:30,309 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,309 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,309 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,309 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,309 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,310 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250190304 2024-11-22T04:36:30,310 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1454010557-172.17.0.2-1732250161415:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:30,310 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1454010557-172.17.0.2-1732250161415:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:30,310 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 2024-11-22T04:36:30,311 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 after 1ms 2024-11-22T04:36:30,314 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 to hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/oldWALs/8fc3ff0a63e6%2C34531%2C1732250164107.1732250186134 2024-11-22T04:36:30,314 DEBUG [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39723:39723),(127.0.0.1/127.0.0.1:38171:38171)] 2024-11-22T04:36:30,335 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/default/TestLogRolling-testLogRollOnPipelineRestart/4c8f356c450202ce6e8c94845d9dc992/.tmp/info/bd2e6ddc78b147c5ba16bb8de45d85f8 is 1080, key is row1002/info:/1732250175339/Put/seqid=0 2024-11-22T04:36:30,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46443 is added to blk_1073741845_1029 (size=9270) 2024-11-22T04:36:30,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40899 is added to blk_1073741845_1029 (size=9270) 2024-11-22T04:36:30,340 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/default/TestLogRolling-testLogRollOnPipelineRestart/4c8f356c450202ce6e8c94845d9dc992/.tmp/info/bd2e6ddc78b147c5ba16bb8de45d85f8 2024-11-22T04:36:30,346 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/default/TestLogRolling-testLogRollOnPipelineRestart/4c8f356c450202ce6e8c94845d9dc992/.tmp/info/bd2e6ddc78b147c5ba16bb8de45d85f8 as hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/default/TestLogRolling-testLogRollOnPipelineRestart/4c8f356c450202ce6e8c94845d9dc992/info/bd2e6ddc78b147c5ba16bb8de45d85f8 2024-11-22T04:36:30,352 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/default/TestLogRolling-testLogRollOnPipelineRestart/4c8f356c450202ce6e8c94845d9dc992/info/bd2e6ddc78b147c5ba16bb8de45d85f8, entries=4, sequenceid=8, filesize=9.1 K 2024-11-22T04:36:30,354 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 4c8f356c450202ce6e8c94845d9dc992 in 50ms, sequenceid=8, compaction requested=false 2024-11-22T04:36:30,354 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 4c8f356c450202ce6e8c94845d9dc992: 2024-11-22T04:36:30,359 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T04:36:30,360 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T04:36:30,360 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:36:30,360 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:36:30,360 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:36:30,360 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T04:36:30,360 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T04:36:30,360 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1645668959, stopped=false 2024-11-22T04:36:30,360 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=8fc3ff0a63e6,36753,1732250163942 2024-11-22T04:36:30,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:36:30,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:36:30,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:30,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:30,417 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T04:36:30,417 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T04:36:30,417 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:36:30,417 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:36:30,417 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:36:30,417 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '8fc3ff0a63e6,34531,1732250164107' ***** 2024-11-22T04:36:30,417 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T04:36:30,418 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T04:36:30,418 INFO [RS:0;8fc3ff0a63e6:34531 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T04:36:30,418 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T04:36:30,418 INFO [RS:0;8fc3ff0a63e6:34531 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T04:36:30,418 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(3091): Received CLOSE for 4c8f356c450202ce6e8c94845d9dc992 2024-11-22T04:36:30,418 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(959): stopping server 8fc3ff0a63e6,34531,1732250164107 2024-11-22T04:36:30,418 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:36:30,418 INFO [RS:0;8fc3ff0a63e6:34531 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;8fc3ff0a63e6:34531. 2024-11-22T04:36:30,418 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 4c8f356c450202ce6e8c94845d9dc992, disabling compactions & flushes 2024-11-22T04:36:30,418 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:36:30,418 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. 2024-11-22T04:36:30,418 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:36:30,418 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. 2024-11-22T04:36:30,418 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. after waiting 0 ms 2024-11-22T04:36:30,418 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. 2024-11-22T04:36:30,418 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T04:36:30,418 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T04:36:30,419 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T04:36:30,419 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:36:30,419 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T04:36:30,419 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T04:36:30,419 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 4c8f356c450202ce6e8c94845d9dc992=TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992.} 2024-11-22T04:36:30,419 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 4c8f356c450202ce6e8c94845d9dc992 2024-11-22T04:36:30,419 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T04:36:30,419 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T04:36:30,419 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T04:36:30,419 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T04:36:30,419 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T04:36:30,423 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/default/TestLogRolling-testLogRollOnPipelineRestart/4c8f356c450202ce6e8c94845d9dc992/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-22T04:36:30,423 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T04:36:30,424 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. 2024-11-22T04:36:30,424 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 4c8f356c450202ce6e8c94845d9dc992: Waiting for close lock at 1732250190418Running coprocessor pre-close hooks at 1732250190418Disabling compacts and flushes for region at 1732250190418Disabling writes for close at 1732250190418Writing region close event to WAL at 1732250190419 (+1 ms)Running coprocessor post-close hooks at 1732250190424 (+5 ms)Closed at 1732250190424 2024-11-22T04:36:30,424 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732250165262.4c8f356c450202ce6e8c94845d9dc992. 2024-11-22T04:36:30,424 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T04:36:30,424 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T04:36:30,424 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732250190419Running coprocessor pre-close hooks at 1732250190419Disabling compacts and flushes for region at 1732250190419Disabling writes for close at 1732250190419Writing region close event to WAL at 1732250190420 (+1 ms)Running coprocessor post-close hooks at 1732250190424 (+4 ms)Closed at 1732250190424 2024-11-22T04:36:30,424 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T04:36:30,595 INFO [regionserver/8fc3ff0a63e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-22T04:36:30,595 INFO [regionserver/8fc3ff0a63e6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-22T04:36:30,596 INFO [regionserver/8fc3ff0a63e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:36:30,619 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(976): stopping server 8fc3ff0a63e6,34531,1732250164107; all regions closed. 2024-11-22T04:36:30,620 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,620 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,620 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,620 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,620 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:30,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46443 is added to blk_1073741840_1023 (size=825) 2024-11-22T04:36:30,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40899 is added to blk_1073741840_1023 (size=825) 2024-11-22T04:36:30,886 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:30,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:31,887 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:31,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:32,613 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T04:36:32,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:32,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:33,888 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:33,894 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T04:36:33,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:34,191 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.meta.1732250165092.meta after 4001ms 2024-11-22T04:36:34,192 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/WALs/8fc3ff0a63e6,34531,1732250164107/8fc3ff0a63e6%2C34531%2C1732250164107.meta.1732250165092.meta to hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/oldWALs/8fc3ff0a63e6%2C34531%2C1732250164107.meta.1732250165092.meta 2024-11-22T04:36:34,194 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/oldWALs 2024-11-22T04:36:34,194 INFO [RS:0;8fc3ff0a63e6:34531 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C34531%2C1732250164107.meta:.meta(num 1732250190180) 2024-11-22T04:36:34,195 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,195 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,195 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,195 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,195 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40899 is added to blk_1073741844_1028 (size=1162) 2024-11-22T04:36:34,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46443 is added to blk_1073741844_1028 (size=1162) 2024-11-22T04:36:34,202 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/oldWALs 2024-11-22T04:36:34,202 INFO [RS:0;8fc3ff0a63e6:34531 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C34531%2C1732250164107:(num 1732250190304) 2024-11-22T04:36:34,202 DEBUG [RS:0;8fc3ff0a63e6:34531 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:36:34,202 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:36:34,202 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:36:34,202 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.ChoreService(370): Chore service for: regionserver/8fc3ff0a63e6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-22T04:36:34,202 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:36:34,202 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:36:34,203 INFO [RS:0;8fc3ff0a63e6:34531 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34531 2024-11-22T04:36:34,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8fc3ff0a63e6,34531,1732250164107 2024-11-22T04:36:34,248 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:36:34,248 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:36:34,258 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8fc3ff0a63e6,34531,1732250164107] 2024-11-22T04:36:34,269 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/8fc3ff0a63e6,34531,1732250164107 already deleted, retry=false 2024-11-22T04:36:34,269 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 8fc3ff0a63e6,34531,1732250164107 expired; onlineServers=0 2024-11-22T04:36:34,269 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '8fc3ff0a63e6,36753,1732250163942' ***** 2024-11-22T04:36:34,269 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T04:36:34,269 INFO [M:0;8fc3ff0a63e6:36753 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:36:34,269 INFO [M:0;8fc3ff0a63e6:36753 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:36:34,269 DEBUG [M:0;8fc3ff0a63e6:36753 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T04:36:34,269 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T04:36:34,269 DEBUG [M:0;8fc3ff0a63e6:36753 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T04:36:34,270 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250164458 {}] cleaner.HFileCleaner(306): Exit Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250164458,5,FailOnTimeoutGroup] 2024-11-22T04:36:34,270 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250164458 {}] cleaner.HFileCleaner(306): Exit Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250164458,5,FailOnTimeoutGroup] 2024-11-22T04:36:34,270 INFO [M:0;8fc3ff0a63e6:36753 {}] hbase.ChoreService(370): Chore service for: master/8fc3ff0a63e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T04:36:34,270 INFO [M:0;8fc3ff0a63e6:36753 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:36:34,270 DEBUG [M:0;8fc3ff0a63e6:36753 {}] master.HMaster(1795): Stopping service threads 2024-11-22T04:36:34,270 INFO [M:0;8fc3ff0a63e6:36753 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T04:36:34,270 INFO [M:0;8fc3ff0a63e6:36753 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T04:36:34,270 INFO [M:0;8fc3ff0a63e6:36753 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T04:36:34,271 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T04:36:34,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T04:36:34,279 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:34,280 DEBUG [M:0;8fc3ff0a63e6:36753 {}] zookeeper.ZKUtil(347): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T04:36:34,280 WARN [M:0;8fc3ff0a63e6:36753 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T04:36:34,280 INFO [M:0;8fc3ff0a63e6:36753 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/.lastflushedseqids 2024-11-22T04:36:34,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46443 is added to blk_1073741846_1030 (size=120) 2024-11-22T04:36:34,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40899 is added to blk_1073741846_1030 (size=120) 2024-11-22T04:36:34,286 INFO [M:0;8fc3ff0a63e6:36753 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T04:36:34,286 INFO [M:0;8fc3ff0a63e6:36753 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T04:36:34,287 DEBUG [M:0;8fc3ff0a63e6:36753 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T04:36:34,287 INFO [M:0;8fc3ff0a63e6:36753 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:36:34,287 DEBUG [M:0;8fc3ff0a63e6:36753 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:36:34,287 DEBUG [M:0;8fc3ff0a63e6:36753 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T04:36:34,287 DEBUG [M:0;8fc3ff0a63e6:36753 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:36:34,287 INFO [M:0;8fc3ff0a63e6:36753 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.17 KB heapSize=29.16 KB 2024-11-22T04:36:34,287 ERROR [FSHLog-0-hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData-prefix:8fc3ff0a63e6,36753,1732250163942 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:34,287 WARN [FSHLog-0-hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData-prefix:8fc3ff0a63e6,36753,1732250163942 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:34,287 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 8fc3ff0a63e6%2C36753%2C1732250163942:(num 1732250164248) roll requested 2024-11-22T04:36:34,287 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C36753%2C1732250163942.1732250194287 2024-11-22T04:36:34,293 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,293 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,293 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,293 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,293 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,293 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/WALs/8fc3ff0a63e6,36753,1732250163942/8fc3ff0a63e6%2C36753%2C1732250163942.1732250164248 with entries=53, filesize=26.62 KB; new WAL /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/WALs/8fc3ff0a63e6,36753,1732250163942/8fc3ff0a63e6%2C36753%2C1732250163942.1732250194287 2024-11-22T04:36:34,294 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:34,294 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:38721,DS-79a70493-8bea-4268-b101-12b2d4d03047,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-22T04:36:34,294 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/WALs/8fc3ff0a63e6,36753,1732250163942/8fc3ff0a63e6%2C36753%2C1732250163942.1732250164248 2024-11-22T04:36:34,294 WARN [IPC Server handler 0 on default port 43513 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/WALs/8fc3ff0a63e6,36753,1732250163942/8fc3ff0a63e6%2C36753%2C1732250163942.1732250164248 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-22T04:36:34,294 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/WALs/8fc3ff0a63e6,36753,1732250163942/8fc3ff0a63e6%2C36753%2C1732250163942.1732250164248 after 0ms 2024-11-22T04:36:34,296 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39723:39723),(127.0.0.1/127.0.0.1:38171:38171)] 2024-11-22T04:36:34,296 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/WALs/8fc3ff0a63e6,36753,1732250163942/8fc3ff0a63e6%2C36753%2C1732250163942.1732250164248 is not closed yet, will try archiving it next time 2024-11-22T04:36:34,311 DEBUG [M:0;8fc3ff0a63e6:36753 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/12278fd08ce249aa887c47dbce15321c is 82, key is hbase:meta,,1/info:regioninfo/1732250165119/Put/seqid=0 2024-11-22T04:36:34,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40899 is added to blk_1073741848_1033 (size=5672) 2024-11-22T04:36:34,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46443 is added to blk_1073741848_1033 (size=5672) 2024-11-22T04:36:34,321 INFO [M:0;8fc3ff0a63e6:36753 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/12278fd08ce249aa887c47dbce15321c 2024-11-22T04:36:34,341 DEBUG [M:0;8fc3ff0a63e6:36753 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1aa38d1860034bb1ab54cc5308b54909 is 778, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732250165648/Put/seqid=0 2024-11-22T04:36:34,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40899 is added to blk_1073741849_1034 (size=6118) 2024-11-22T04:36:34,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46443 is added to blk_1073741849_1034 (size=6118) 2024-11-22T04:36:34,350 INFO [M:0;8fc3ff0a63e6:36753 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.57 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1aa38d1860034bb1ab54cc5308b54909 2024-11-22T04:36:34,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:36:34,358 INFO [RS:0;8fc3ff0a63e6:34531 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:36:34,358 INFO [RS:0;8fc3ff0a63e6:34531 {}] regionserver.HRegionServer(1031): Exiting; stopping=8fc3ff0a63e6,34531,1732250164107; zookeeper connection closed. 2024-11-22T04:36:34,358 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34531-0x10160d3d67d0001, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:36:34,359 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@75ce4b18 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@75ce4b18 2024-11-22T04:36:34,359 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T04:36:34,377 DEBUG [M:0;8fc3ff0a63e6:36753 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ea59d8652979490dbb6eea46b778f2cf is 69, key is 8fc3ff0a63e6,34531,1732250164107/rs:state/1732250164574/Put/seqid=0 2024-11-22T04:36:34,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46443 is added to blk_1073741850_1035 (size=5156) 2024-11-22T04:36:34,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40899 is added to blk_1073741850_1035 (size=5156) 2024-11-22T04:36:34,382 INFO [M:0;8fc3ff0a63e6:36753 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ea59d8652979490dbb6eea46b778f2cf 2024-11-22T04:36:34,401 DEBUG [M:0;8fc3ff0a63e6:36753 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9931683ac10343ebb8767a919d0b75ef is 52, key is load_balancer_on/state:d/1732250165256/Put/seqid=0 2024-11-22T04:36:34,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46443 is added to blk_1073741851_1036 (size=5056) 2024-11-22T04:36:34,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40899 is added to blk_1073741851_1036 (size=5056) 2024-11-22T04:36:34,407 INFO [M:0;8fc3ff0a63e6:36753 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9931683ac10343ebb8767a919d0b75ef 2024-11-22T04:36:34,414 DEBUG [M:0;8fc3ff0a63e6:36753 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/12278fd08ce249aa887c47dbce15321c as hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/12278fd08ce249aa887c47dbce15321c 2024-11-22T04:36:34,420 INFO [M:0;8fc3ff0a63e6:36753 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/12278fd08ce249aa887c47dbce15321c, entries=8, sequenceid=56, filesize=5.5 K 2024-11-22T04:36:34,421 DEBUG [M:0;8fc3ff0a63e6:36753 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1aa38d1860034bb1ab54cc5308b54909 as hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1aa38d1860034bb1ab54cc5308b54909 2024-11-22T04:36:34,428 INFO [M:0;8fc3ff0a63e6:36753 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1aa38d1860034bb1ab54cc5308b54909, entries=6, sequenceid=56, filesize=6.0 K 2024-11-22T04:36:34,429 DEBUG [M:0;8fc3ff0a63e6:36753 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ea59d8652979490dbb6eea46b778f2cf as hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ea59d8652979490dbb6eea46b778f2cf 2024-11-22T04:36:34,434 INFO [M:0;8fc3ff0a63e6:36753 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ea59d8652979490dbb6eea46b778f2cf, entries=1, sequenceid=56, filesize=5.0 K 2024-11-22T04:36:34,435 DEBUG [M:0;8fc3ff0a63e6:36753 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9931683ac10343ebb8767a919d0b75ef as hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9931683ac10343ebb8767a919d0b75ef 2024-11-22T04:36:34,441 INFO [M:0;8fc3ff0a63e6:36753 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9931683ac10343ebb8767a919d0b75ef, entries=1, sequenceid=56, filesize=4.9 K 2024-11-22T04:36:34,442 INFO [M:0;8fc3ff0a63e6:36753 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=56, compaction requested=false 2024-11-22T04:36:34,444 INFO [M:0;8fc3ff0a63e6:36753 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:36:34,444 DEBUG [M:0;8fc3ff0a63e6:36753 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732250194286Disabling compacts and flushes for region at 1732250194286Disabling writes for close at 1732250194287 (+1 ms)Obtaining lock to block concurrent updates at 1732250194287Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732250194287Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23726, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732250194287Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732250194297 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732250194297Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732250194310 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732250194311 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732250194326 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732250194341 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732250194341Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732250194356 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732250194376 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732250194376Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732250194387 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732250194401 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732250194401Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4ca95683: reopening flushed file at 1732250194413 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@172759d0: reopening flushed file at 1732250194420 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@35b3af2f: reopening flushed file at 1732250194428 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32f7f254: reopening flushed file at 1732250194435 (+7 ms)Finished flush of dataSize ~23.17 KB/23726, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 155ms, sequenceid=56, compaction requested=false at 1732250194442 (+7 ms)Writing region close event to WAL at 1732250194443 (+1 ms)Closed at 1732250194443 2024-11-22T04:36:34,444 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,444 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,444 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,445 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,445 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:36:34,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46443 is added to blk_1073741847_1031 (size=757) 2024-11-22T04:36:34,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40899 is added to blk_1073741847_1031 (size=757) 2024-11-22T04:36:34,889 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:34,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:35,424 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,425 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,445 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,449 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,452 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,456 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,457 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,612 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-22T04:36:35,890 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:35,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:35,959 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T04:36:35,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,986 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,987 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,991 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,992 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,993 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:35,996 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:36,891 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:36,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:37,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:37,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:38,295 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/WALs/8fc3ff0a63e6,36753,1732250163942/8fc3ff0a63e6%2C36753%2C1732250163942.1732250164248 after 4001ms 2024-11-22T04:36:38,296 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/WALs/8fc3ff0a63e6,36753,1732250163942/8fc3ff0a63e6%2C36753%2C1732250163942.1732250164248 to hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/oldWALs/8fc3ff0a63e6%2C36753%2C1732250163942.1732250164248 2024-11-22T04:36:38,298 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/MasterData/oldWALs/8fc3ff0a63e6%2C36753%2C1732250163942.1732250164248 to hdfs://localhost:43513/user/jenkins/test-data/5ce348a4-023c-9a99-9498-e9f0a5aaaf61/oldWALs/8fc3ff0a63e6%2C36753%2C1732250163942.1732250164248$masterlocalwal$ 2024-11-22T04:36:38,298 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:36:38,298 INFO [M:0;8fc3ff0a63e6:36753 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T04:36:38,298 INFO [M:0;8fc3ff0a63e6:36753 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36753 2024-11-22T04:36:38,298 INFO [M:0;8fc3ff0a63e6:36753 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:36:38,453 INFO [M:0;8fc3ff0a63e6:36753 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:36:38,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:36:38,453 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36753-0x10160d3d67d0000, quorum=127.0.0.1:54652, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:36:38,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@23b7049e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:38,456 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@72d2e6e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:36:38,456 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:36:38,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2a260e23{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:36:38,456 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@278dfe13{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir/,STOPPED} 2024-11-22T04:36:38,458 WARN [BP-1454010557-172.17.0.2-1732250161415 heartbeating to localhost/127.0.0.1:43513 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:36:38,458 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:36:38,458 WARN [BP-1454010557-172.17.0.2-1732250161415 heartbeating to localhost/127.0.0.1:43513 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1454010557-172.17.0.2-1732250161415 (Datanode Uuid aa48f74c-9c08-4611-b09b-ea41fb154334) service to localhost/127.0.0.1:43513 2024-11-22T04:36:38,458 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:36:38,459 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data3/current/BP-1454010557-172.17.0.2-1732250161415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:38,459 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data4/current/BP-1454010557-172.17.0.2-1732250161415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:38,460 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:36:38,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@66269315{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:38,463 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1bc7279c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:36:38,463 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:36:38,463 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10b7c71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:36:38,464 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f0690d5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir/,STOPPED} 2024-11-22T04:36:38,465 WARN [BP-1454010557-172.17.0.2-1732250161415 heartbeating to localhost/127.0.0.1:43513 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:36:38,465 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:36:38,465 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:36:38,465 WARN [BP-1454010557-172.17.0.2-1732250161415 heartbeating to localhost/127.0.0.1:43513 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1454010557-172.17.0.2-1732250161415 (Datanode Uuid 46de8883-cfbb-4e8c-8037-ed77d701058e) service to localhost/127.0.0.1:43513 2024-11-22T04:36:38,466 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data1/current/BP-1454010557-172.17.0.2-1732250161415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:38,466 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/cluster_5fe3ec54-df9e-3417-f805-f4100cf0ede9/data/data2/current/BP-1454010557-172.17.0.2-1732250161415 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:36:38,466 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:36:38,471 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1fc8bed8{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T04:36:38,472 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@42aa99e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:36:38,472 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:36:38,472 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2835f29c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:36:38,472 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@140caf6f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir/,STOPPED} 2024-11-22T04:36:38,478 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T04:36:38,498 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T04:36:38,507 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:43513 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43513 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43513 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43513 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43513 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43513 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43513 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:43513 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=265 (was 237) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=8462 (was 8626) 2024-11-22T04:36:38,515 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=265, ProcessCount=11, AvailableMemoryMB=8462 2024-11-22T04:36:38,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T04:36:38,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.log.dir so I do NOT create it in target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3 2024-11-22T04:36:38,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/eabbed41-500f-fa72-9295-c679ee71c0e0/hadoop.tmp.dir so I do NOT create it in target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3 2024-11-22T04:36:38,516 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/cluster_6d578ef8-ef74-bdc1-cf72-7348f562c462, deleteOnExit=true 2024-11-22T04:36:38,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T04:36:38,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/test.cache.data in system properties and HBase conf 2024-11-22T04:36:38,516 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T04:36:38,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/hadoop.log.dir in system properties and HBase conf 2024-11-22T04:36:38,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T04:36:38,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T04:36:38,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T04:36:38,517 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T04:36:38,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T04:36:38,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T04:36:38,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T04:36:38,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T04:36:38,517 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T04:36:38,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T04:36:38,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T04:36:38,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T04:36:38,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T04:36:38,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/nfs.dump.dir in system properties and HBase conf 2024-11-22T04:36:38,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/java.io.tmpdir in system properties and HBase conf 2024-11-22T04:36:38,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T04:36:38,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T04:36:38,518 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T04:36:38,531 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T04:36:38,874 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:36:38,878 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:36:38,884 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:36:38,884 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:36:38,884 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T04:36:38,885 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:36:38,885 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6bd1231f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:36:38,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@618f0457{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:36:38,892 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:38,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:38,993 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b135886{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/java.io.tmpdir/jetty-localhost-46665-hadoop-hdfs-3_4_1-tests_jar-_-any-6833097247978176736/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T04:36:38,994 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1bfe295d{HTTP/1.1, (http/1.1)}{localhost:46665} 2024-11-22T04:36:38,994 INFO [Time-limited test {}] server.Server(415): Started @196260ms 2024-11-22T04:36:39,008 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T04:36:39,239 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:36:39,242 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:36:39,243 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:36:39,243 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:36:39,243 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:36:39,243 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f17b515{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:36:39,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@750c8565{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:36:39,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@34534d9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/java.io.tmpdir/jetty-localhost-39401-hadoop-hdfs-3_4_1-tests_jar-_-any-11478587739797577279/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:39,359 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1b7fc8f3{HTTP/1.1, (http/1.1)}{localhost:39401} 2024-11-22T04:36:39,359 INFO [Time-limited test {}] server.Server(415): Started @196626ms 2024-11-22T04:36:39,361 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:36:39,389 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:36:39,392 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:36:39,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:36:39,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:36:39,393 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:36:39,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@62aa92bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:36:39,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f13d08d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:36:39,498 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@101a8f4e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/java.io.tmpdir/jetty-localhost-38863-hadoop-hdfs-3_4_1-tests_jar-_-any-8801881456202864345/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:36:39,499 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1523019d{HTTP/1.1, (http/1.1)}{localhost:38863} 2024-11-22T04:36:39,499 INFO [Time-limited test {}] server.Server(415): Started @196765ms 2024-11-22T04:36:39,500 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:36:39,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:39,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:39,997 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T04:36:39,997 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T04:36:39,997 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T04:36:39,997 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-22T04:36:40,584 WARN [Thread-1654 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/cluster_6d578ef8-ef74-bdc1-cf72-7348f562c462/data/data1/current/BP-570972098-172.17.0.2-1732250198543/current, will proceed with Du for space computation calculation, 2024-11-22T04:36:40,585 WARN [Thread-1655 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/cluster_6d578ef8-ef74-bdc1-cf72-7348f562c462/data/data2/current/BP-570972098-172.17.0.2-1732250198543/current, will proceed with Du for space computation calculation, 2024-11-22T04:36:40,604 WARN [Thread-1618 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:36:40,606 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc4bc1718a95aae44 with lease ID 0xb06bddec5e186937: Processing first storage report for DS-a8147d8f-fbd9-409b-ae7d-5a393632e00e from datanode DatanodeRegistration(127.0.0.1:39075, datanodeUuid=df110a1d-1043-4639-84f1-0da6f890261d, infoPort=46101, infoSecurePort=0, ipcPort=40907, storageInfo=lv=-57;cid=testClusterID;nsid=1971300676;c=1732250198543) 2024-11-22T04:36:40,606 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc4bc1718a95aae44 with lease ID 0xb06bddec5e186937: from storage DS-a8147d8f-fbd9-409b-ae7d-5a393632e00e node DatanodeRegistration(127.0.0.1:39075, datanodeUuid=df110a1d-1043-4639-84f1-0da6f890261d, infoPort=46101, infoSecurePort=0, ipcPort=40907, storageInfo=lv=-57;cid=testClusterID;nsid=1971300676;c=1732250198543), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:36:40,607 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc4bc1718a95aae44 with lease ID 0xb06bddec5e186937: Processing first storage report for DS-f845f7be-eda6-4c2e-ba13-0ce03b128d66 from datanode DatanodeRegistration(127.0.0.1:39075, datanodeUuid=df110a1d-1043-4639-84f1-0da6f890261d, infoPort=46101, infoSecurePort=0, ipcPort=40907, storageInfo=lv=-57;cid=testClusterID;nsid=1971300676;c=1732250198543) 2024-11-22T04:36:40,607 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc4bc1718a95aae44 with lease ID 0xb06bddec5e186937: from storage DS-f845f7be-eda6-4c2e-ba13-0ce03b128d66 node DatanodeRegistration(127.0.0.1:39075, datanodeUuid=df110a1d-1043-4639-84f1-0da6f890261d, infoPort=46101, infoSecurePort=0, ipcPort=40907, storageInfo=lv=-57;cid=testClusterID;nsid=1971300676;c=1732250198543), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:36:40,707 WARN [Thread-1665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/cluster_6d578ef8-ef74-bdc1-cf72-7348f562c462/data/data3/current/BP-570972098-172.17.0.2-1732250198543/current, will proceed with Du for space computation calculation, 2024-11-22T04:36:40,707 WARN [Thread-1666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/cluster_6d578ef8-ef74-bdc1-cf72-7348f562c462/data/data4/current/BP-570972098-172.17.0.2-1732250198543/current, will proceed with Du for space computation calculation, 2024-11-22T04:36:40,726 WARN [Thread-1641 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:36:40,728 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa785fb06a8294445 with lease ID 0xb06bddec5e186938: Processing first storage report for DS-953f5aa3-edae-41f5-9dcd-20253c178b85 from datanode DatanodeRegistration(127.0.0.1:35945, datanodeUuid=34a699fb-b146-4559-b8e9-09143ffca707, infoPort=43667, infoSecurePort=0, ipcPort=41517, storageInfo=lv=-57;cid=testClusterID;nsid=1971300676;c=1732250198543) 2024-11-22T04:36:40,728 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa785fb06a8294445 with lease ID 0xb06bddec5e186938: from storage DS-953f5aa3-edae-41f5-9dcd-20253c178b85 node DatanodeRegistration(127.0.0.1:35945, datanodeUuid=34a699fb-b146-4559-b8e9-09143ffca707, infoPort=43667, infoSecurePort=0, ipcPort=41517, storageInfo=lv=-57;cid=testClusterID;nsid=1971300676;c=1732250198543), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T04:36:40,728 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa785fb06a8294445 with lease ID 0xb06bddec5e186938: Processing first storage report for DS-76a7f655-05b2-4147-ab33-fce96f1381bc from datanode DatanodeRegistration(127.0.0.1:35945, datanodeUuid=34a699fb-b146-4559-b8e9-09143ffca707, infoPort=43667, infoSecurePort=0, ipcPort=41517, storageInfo=lv=-57;cid=testClusterID;nsid=1971300676;c=1732250198543) 2024-11-22T04:36:40,728 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa785fb06a8294445 with lease ID 0xb06bddec5e186938: from storage DS-76a7f655-05b2-4147-ab33-fce96f1381bc node DatanodeRegistration(127.0.0.1:35945, datanodeUuid=34a699fb-b146-4559-b8e9-09143ffca707, infoPort=43667, infoSecurePort=0, ipcPort=41517, storageInfo=lv=-57;cid=testClusterID;nsid=1971300676;c=1732250198543), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:36:40,830 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3 2024-11-22T04:36:40,833 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/cluster_6d578ef8-ef74-bdc1-cf72-7348f562c462/zookeeper_0, clientPort=51821, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/cluster_6d578ef8-ef74-bdc1-cf72-7348f562c462/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/cluster_6d578ef8-ef74-bdc1-cf72-7348f562c462/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T04:36:40,834 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51821 2024-11-22T04:36:40,834 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:40,836 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:40,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741825_1001 (size=7) 2024-11-22T04:36:40,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741825_1001 (size=7) 2024-11-22T04:36:40,848 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af with version=8 2024-11-22T04:36:40,848 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/hbase-staging 2024-11-22T04:36:40,850 INFO [Time-limited test {}] client.ConnectionUtils(128): master/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:36:40,850 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:36:40,850 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:36:40,851 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:36:40,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:36:40,851 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:36:40,851 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T04:36:40,851 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:36:40,851 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41701 2024-11-22T04:36:40,853 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41701 connecting to ZooKeeper ensemble=127.0.0.1:51821 2024-11-22T04:36:40,893 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:40,907 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:417010x0, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:36:40,907 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41701-0x10160d466ac0000 connected 2024-11-22T04:36:40,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:40,984 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:40,985 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:40,987 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:36:40,988 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af, hbase.cluster.distributed=false 2024-11-22T04:36:40,989 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:36:40,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41701 2024-11-22T04:36:40,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41701 2024-11-22T04:36:40,995 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41701 2024-11-22T04:36:40,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41701 2024-11-22T04:36:40,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41701 2024-11-22T04:36:41,013 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:36:41,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:36:41,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:36:41,013 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:36:41,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:36:41,013 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:36:41,013 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T04:36:41,013 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:36:41,014 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44715 2024-11-22T04:36:41,015 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44715 connecting to ZooKeeper ensemble=127.0.0.1:51821 2024-11-22T04:36:41,016 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:41,018 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:41,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:447150x0, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:36:41,026 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:447150x0, quorum=127.0.0.1:51821, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:36:41,027 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T04:36:41,027 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44715-0x10160d466ac0001 connected 2024-11-22T04:36:41,027 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T04:36:41,028 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T04:36:41,029 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:36:41,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44715 2024-11-22T04:36:41,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44715 2024-11-22T04:36:41,029 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44715 2024-11-22T04:36:41,030 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44715 2024-11-22T04:36:41,030 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44715 2024-11-22T04:36:41,043 DEBUG [M:0;8fc3ff0a63e6:41701 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;8fc3ff0a63e6:41701 2024-11-22T04:36:41,043 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/8fc3ff0a63e6,41701,1732250200850 2024-11-22T04:36:41,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:36:41,047 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:36:41,048 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/8fc3ff0a63e6,41701,1732250200850 2024-11-22T04:36:41,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T04:36:41,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:41,058 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:41,058 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T04:36:41,059 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/8fc3ff0a63e6,41701,1732250200850 from backup master directory 2024-11-22T04:36:41,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/8fc3ff0a63e6,41701,1732250200850 2024-11-22T04:36:41,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:36:41,068 WARN [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:36:41,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:36:41,068 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=8fc3ff0a63e6,41701,1732250200850 2024-11-22T04:36:41,073 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/hbase.id] with ID: efefdc7b-a02b-4aa8-987b-0083656f02db 2024-11-22T04:36:41,073 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/.tmp/hbase.id 2024-11-22T04:36:41,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:36:41,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:36:41,080 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/.tmp/hbase.id]:[hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/hbase.id] 2024-11-22T04:36:41,094 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:41,094 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T04:36:41,096 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-22T04:36:41,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:41,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:41,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741827_1003 (size=196) 2024-11-22T04:36:41,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741827_1003 (size=196) 2024-11-22T04:36:41,116 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T04:36:41,117 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T04:36:41,117 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:36:41,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:36:41,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:36:41,125 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store 2024-11-22T04:36:41,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741829_1005 (size=34) 2024-11-22T04:36:41,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741829_1005 (size=34) 2024-11-22T04:36:41,132 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:36:41,132 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T04:36:41,132 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:36:41,132 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:36:41,132 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T04:36:41,132 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:36:41,132 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:36:41,133 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732250201132Disabling compacts and flushes for region at 1732250201132Disabling writes for close at 1732250201132Writing region close event to WAL at 1732250201132Closed at 1732250201132 2024-11-22T04:36:41,133 WARN [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/.initializing 2024-11-22T04:36:41,133 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/WALs/8fc3ff0a63e6,41701,1732250200850 2024-11-22T04:36:41,136 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C41701%2C1732250200850, suffix=, logDir=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/WALs/8fc3ff0a63e6,41701,1732250200850, archiveDir=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/oldWALs, maxLogs=10 2024-11-22T04:36:41,136 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C41701%2C1732250200850.1732250201136 2024-11-22T04:36:41,140 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/WALs/8fc3ff0a63e6,41701,1732250200850/8fc3ff0a63e6%2C41701%2C1732250200850.1732250201136 2024-11-22T04:36:41,142 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46101:46101),(127.0.0.1/127.0.0.1:43667:43667)] 2024-11-22T04:36:41,142 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:36:41,142 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:36:41,143 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:41,143 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:41,144 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:41,145 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T04:36:41,146 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:41,146 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:41,146 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:41,147 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T04:36:41,147 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:41,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:36:41,148 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:41,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T04:36:41,149 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:41,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:36:41,149 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:41,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T04:36:41,151 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:41,151 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:36:41,151 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:41,152 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:41,152 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:41,154 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:41,154 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:41,155 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T04:36:41,156 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:36:41,158 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:36:41,159 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=716745, jitterRate=-0.08861234784126282}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T04:36:41,159 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732250201143Initializing all the Stores at 1732250201144 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250201144Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250201144Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250201144Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250201144Cleaning up temporary data from old regions at 1732250201154 (+10 ms)Region opened successfully at 1732250201159 (+5 ms) 2024-11-22T04:36:41,159 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T04:36:41,162 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66fe6534, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:36:41,163 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T04:36:41,164 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T04:36:41,164 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T04:36:41,164 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T04:36:41,164 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T04:36:41,165 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T04:36:41,165 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T04:36:41,167 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T04:36:41,168 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T04:36:41,174 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T04:36:41,174 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T04:36:41,175 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T04:36:41,184 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T04:36:41,184 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T04:36:41,185 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T04:36:41,194 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T04:36:41,195 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T04:36:41,205 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T04:36:41,207 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T04:36:41,215 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T04:36:41,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T04:36:41,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T04:36:41,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:41,226 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:41,227 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=8fc3ff0a63e6,41701,1732250200850, sessionid=0x10160d466ac0000, setting cluster-up flag (Was=false) 2024-11-22T04:36:41,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:41,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:41,279 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T04:36:41,280 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8fc3ff0a63e6,41701,1732250200850 2024-11-22T04:36:41,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:41,300 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:41,331 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T04:36:41,332 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8fc3ff0a63e6,41701,1732250200850 2024-11-22T04:36:41,333 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T04:36:41,335 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T04:36:41,335 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T04:36:41,336 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T04:36:41,336 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 8fc3ff0a63e6,41701,1732250200850 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T04:36:41,337 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:36:41,337 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:36:41,338 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:36:41,338 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:36:41,338 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/8fc3ff0a63e6:0, corePoolSize=10, maxPoolSize=10 2024-11-22T04:36:41,338 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:41,338 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:36:41,338 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:41,339 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732250231339 2024-11-22T04:36:41,339 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T04:36:41,339 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T04:36:41,339 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T04:36:41,339 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T04:36:41,339 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T04:36:41,339 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T04:36:41,339 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:41,340 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T04:36:41,340 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:36:41,340 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T04:36:41,340 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T04:36:41,340 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T04:36:41,340 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T04:36:41,340 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T04:36:41,340 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250201340,5,FailOnTimeoutGroup] 2024-11-22T04:36:41,341 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250201341,5,FailOnTimeoutGroup] 2024-11-22T04:36:41,341 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:41,341 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T04:36:41,341 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:41,341 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:41,341 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:41,341 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T04:36:41,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:36:41,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:36:41,350 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T04:36:41,351 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af 2024-11-22T04:36:41,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:36:41,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:36:41,357 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:36:41,359 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T04:36:41,360 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T04:36:41,360 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:41,360 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:41,360 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T04:36:41,361 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T04:36:41,361 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:41,362 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:41,362 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T04:36:41,363 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T04:36:41,363 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:41,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:41,364 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T04:36:41,365 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T04:36:41,365 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:41,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:41,366 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T04:36:41,366 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740 2024-11-22T04:36:41,366 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740 2024-11-22T04:36:41,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T04:36:41,368 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T04:36:41,368 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T04:36:41,369 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T04:36:41,371 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:36:41,372 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=835308, jitterRate=0.062150269746780396}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T04:36:41,372 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732250201358Initializing all the Stores at 1732250201358Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250201358Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250201358Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250201358Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250201358Cleaning up temporary data from old regions at 1732250201368 (+10 ms)Region opened successfully at 1732250201372 (+4 ms) 2024-11-22T04:36:41,372 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T04:36:41,372 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T04:36:41,372 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T04:36:41,372 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T04:36:41,372 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T04:36:41,373 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T04:36:41,373 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732250201372Disabling compacts and flushes for region at 1732250201372Disabling writes for close at 1732250201372Writing region close event to WAL at 1732250201373 (+1 ms)Closed at 1732250201373 2024-11-22T04:36:41,374 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:36:41,374 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T04:36:41,374 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T04:36:41,376 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T04:36:41,377 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T04:36:41,432 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(746): ClusterId : efefdc7b-a02b-4aa8-987b-0083656f02db 2024-11-22T04:36:41,432 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T04:36:41,437 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T04:36:41,437 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T04:36:41,448 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T04:36:41,448 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@514eb9aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:36:41,464 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;8fc3ff0a63e6:44715 2024-11-22T04:36:41,464 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T04:36:41,464 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T04:36:41,464 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T04:36:41,464 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(2659): reportForDuty to master=8fc3ff0a63e6,41701,1732250200850 with port=44715, startcode=1732250201012 2024-11-22T04:36:41,465 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T04:36:41,466 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40463, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T04:36:41,467 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41701 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 8fc3ff0a63e6,44715,1732250201012 2024-11-22T04:36:41,467 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41701 {}] master.ServerManager(517): Registering regionserver=8fc3ff0a63e6,44715,1732250201012 2024-11-22T04:36:41,468 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af 2024-11-22T04:36:41,469 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35351 2024-11-22T04:36:41,469 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T04:36:41,479 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:36:41,479 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] zookeeper.ZKUtil(111): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8fc3ff0a63e6,44715,1732250201012 2024-11-22T04:36:41,479 WARN [RS:0;8fc3ff0a63e6:44715 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:36:41,479 INFO [RS:0;8fc3ff0a63e6:44715 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:36:41,479 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012 2024-11-22T04:36:41,480 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8fc3ff0a63e6,44715,1732250201012] 2024-11-22T04:36:41,483 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T04:36:41,485 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T04:36:41,486 INFO [RS:0;8fc3ff0a63e6:44715 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T04:36:41,486 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:41,486 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T04:36:41,487 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T04:36:41,487 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:41,487 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:41,487 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:41,488 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:41,488 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:41,488 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:41,488 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:36:41,488 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:41,488 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:41,488 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:41,488 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:41,488 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:41,488 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:36:41,488 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:36:41,488 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:36:41,494 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:41,494 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:41,494 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:41,494 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:41,494 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:41,494 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,44715,1732250201012-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:36:41,511 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T04:36:41,512 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,44715,1732250201012-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:41,512 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:41,512 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.Replication(171): 8fc3ff0a63e6,44715,1732250201012 started 2024-11-22T04:36:41,527 WARN [8fc3ff0a63e6:41701 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T04:36:41,527 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:41,527 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(1482): Serving as 8fc3ff0a63e6,44715,1732250201012, RpcServer on 8fc3ff0a63e6/172.17.0.2:44715, sessionid=0x10160d466ac0001 2024-11-22T04:36:41,528 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T04:36:41,528 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8fc3ff0a63e6,44715,1732250201012 2024-11-22T04:36:41,528 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,44715,1732250201012' 2024-11-22T04:36:41,528 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T04:36:41,528 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T04:36:41,529 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T04:36:41,529 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T04:36:41,529 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8fc3ff0a63e6,44715,1732250201012 2024-11-22T04:36:41,529 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,44715,1732250201012' 2024-11-22T04:36:41,529 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T04:36:41,529 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T04:36:41,530 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T04:36:41,530 INFO [RS:0;8fc3ff0a63e6:44715 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T04:36:41,530 INFO [RS:0;8fc3ff0a63e6:44715 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T04:36:41,632 INFO [RS:0;8fc3ff0a63e6:44715 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C44715%2C1732250201012, suffix=, logDir=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012, archiveDir=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/oldWALs, maxLogs=32 2024-11-22T04:36:41,632 INFO [RS:0;8fc3ff0a63e6:44715 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C44715%2C1732250201012.1732250201632 2024-11-22T04:36:41,637 INFO [RS:0;8fc3ff0a63e6:44715 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012/8fc3ff0a63e6%2C44715%2C1732250201012.1732250201632 2024-11-22T04:36:41,638 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43667:43667),(127.0.0.1/127.0.0.1:46101:46101)] 2024-11-22T04:36:41,777 DEBUG [8fc3ff0a63e6:41701 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T04:36:41,778 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=8fc3ff0a63e6,44715,1732250201012 2024-11-22T04:36:41,780 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8fc3ff0a63e6,44715,1732250201012, state=OPENING 2024-11-22T04:36:41,784 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T04:36:41,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:41,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:36:41,795 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T04:36:41,795 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:36:41,795 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:36:41,796 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,44715,1732250201012}] 2024-11-22T04:36:41,894 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:41,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:41,950 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T04:36:41,953 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54895, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T04:36:41,959 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T04:36:41,959 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:36:41,962 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C44715%2C1732250201012.meta, suffix=.meta, logDir=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012, archiveDir=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/oldWALs, maxLogs=32 2024-11-22T04:36:41,963 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C44715%2C1732250201012.meta.1732250201963.meta 2024-11-22T04:36:41,969 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012/8fc3ff0a63e6%2C44715%2C1732250201012.meta.1732250201963.meta 2024-11-22T04:36:41,976 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46101:46101),(127.0.0.1/127.0.0.1:43667:43667)] 2024-11-22T04:36:41,977 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:36:41,977 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T04:36:41,977 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T04:36:41,977 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T04:36:41,978 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T04:36:41,978 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:36:41,978 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T04:36:41,978 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T04:36:41,979 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T04:36:41,980 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T04:36:41,980 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:41,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:41,981 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T04:36:41,982 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T04:36:41,982 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:41,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:41,982 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T04:36:41,983 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T04:36:41,983 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:41,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:41,984 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T04:36:41,984 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T04:36:41,985 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:41,985 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:36:41,985 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T04:36:41,986 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740 2024-11-22T04:36:41,987 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740 2024-11-22T04:36:41,989 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T04:36:41,989 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T04:36:41,989 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T04:36:41,991 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T04:36:41,992 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=710145, jitterRate=-0.0970049500465393}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T04:36:41,992 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T04:36:41,993 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732250201978Writing region info on filesystem at 1732250201978Initializing all the Stores at 1732250201979 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250201979Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250201979Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250201979Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250201979Cleaning up temporary data from old regions at 1732250201989 (+10 ms)Running coprocessor post-open hooks at 1732250201992 (+3 ms)Region opened successfully at 1732250201992 2024-11-22T04:36:41,994 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732250201950 2024-11-22T04:36:41,997 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T04:36:41,997 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T04:36:41,998 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=8fc3ff0a63e6,44715,1732250201012 2024-11-22T04:36:41,999 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8fc3ff0a63e6,44715,1732250201012, state=OPEN 2024-11-22T04:36:42,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T04:36:42,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T04:36:42,037 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,44715,1732250201012 2024-11-22T04:36:42,037 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:36:42,037 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:36:42,041 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T04:36:42,041 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,44715,1732250201012 in 242 msec 2024-11-22T04:36:42,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T04:36:42,043 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 667 msec 2024-11-22T04:36:42,045 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:36:42,045 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T04:36:42,046 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T04:36:42,046 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8fc3ff0a63e6,44715,1732250201012, seqNum=-1] 2024-11-22T04:36:42,047 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T04:36:42,048 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40593, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T04:36:42,054 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 719 msec 2024-11-22T04:36:42,055 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732250202055, completionTime=-1 2024-11-22T04:36:42,055 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T04:36:42,055 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T04:36:42,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T04:36:42,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732250262057 2024-11-22T04:36:42,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732250322057 2024-11-22T04:36:42,057 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T04:36:42,058 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41701,1732250200850-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:42,058 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41701,1732250200850-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:42,058 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41701,1732250200850-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:42,058 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-8fc3ff0a63e6:41701, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:42,058 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:42,058 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:42,060 DEBUG [master/8fc3ff0a63e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T04:36:42,062 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.994sec 2024-11-22T04:36:42,062 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T04:36:42,062 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T04:36:42,062 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T04:36:42,062 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T04:36:42,063 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T04:36:42,063 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41701,1732250200850-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:36:42,063 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41701,1732250200850-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T04:36:42,065 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T04:36:42,065 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T04:36:42,065 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41701,1732250200850-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:36:42,132 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@470f1595, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:36:42,132 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 8fc3ff0a63e6,41701,-1 for getting cluster id 2024-11-22T04:36:42,133 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T04:36:42,134 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'efefdc7b-a02b-4aa8-987b-0083656f02db' 2024-11-22T04:36:42,135 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T04:36:42,135 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "efefdc7b-a02b-4aa8-987b-0083656f02db" 2024-11-22T04:36:42,135 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66106cb4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:36:42,135 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [8fc3ff0a63e6,41701,-1] 2024-11-22T04:36:42,135 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T04:36:42,135 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:36:42,137 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46834, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T04:36:42,138 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@394b54be, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:36:42,138 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T04:36:42,139 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8fc3ff0a63e6,44715,1732250201012, seqNum=-1] 2024-11-22T04:36:42,139 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T04:36:42,141 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58118, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T04:36:42,142 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=8fc3ff0a63e6,41701,1732250200850 2024-11-22T04:36:42,143 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:36:42,145 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T04:36:42,146 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T04:36:42,147 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 8fc3ff0a63e6,41701,1732250200850 2024-11-22T04:36:42,147 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4442d12d 2024-11-22T04:36:42,147 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T04:36:42,148 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46838, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T04:36:42,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T04:36:42,149 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T04:36:42,149 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T04:36:42,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T04:36:42,152 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T04:36:42,152 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:42,152 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-22T04:36:42,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T04:36:42,153 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T04:36:42,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741835_1011 (size=405) 2024-11-22T04:36:42,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741835_1011 (size=405) 2024-11-22T04:36:42,162 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => cd97978f44c6a4ee80f494afa91c1edc, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af 2024-11-22T04:36:42,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741836_1012 (size=88) 2024-11-22T04:36:42,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741836_1012 (size=88) 2024-11-22T04:36:42,169 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:36:42,169 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing cd97978f44c6a4ee80f494afa91c1edc, disabling compactions & flushes 2024-11-22T04:36:42,169 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:36:42,169 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:36:42,169 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. after waiting 0 ms 2024-11-22T04:36:42,169 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:36:42,169 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:36:42,169 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for cd97978f44c6a4ee80f494afa91c1edc: Waiting for close lock at 1732250202169Disabling compacts and flushes for region at 1732250202169Disabling writes for close at 1732250202169Writing region close event to WAL at 1732250202169Closed at 1732250202169 2024-11-22T04:36:42,170 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T04:36:42,171 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732250202171"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732250202171"}]},"ts":"1732250202171"} 2024-11-22T04:36:42,173 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T04:36:42,174 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T04:36:42,175 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732250202174"}]},"ts":"1732250202174"} 2024-11-22T04:36:42,177 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-22T04:36:42,177 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cd97978f44c6a4ee80f494afa91c1edc, ASSIGN}] 2024-11-22T04:36:42,178 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cd97978f44c6a4ee80f494afa91c1edc, ASSIGN 2024-11-22T04:36:42,179 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cd97978f44c6a4ee80f494afa91c1edc, ASSIGN; state=OFFLINE, location=8fc3ff0a63e6,44715,1732250201012; forceNewPlan=false, retain=false 2024-11-22T04:36:42,330 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cd97978f44c6a4ee80f494afa91c1edc, regionState=OPENING, regionLocation=8fc3ff0a63e6,44715,1732250201012 2024-11-22T04:36:42,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cd97978f44c6a4ee80f494afa91c1edc, ASSIGN because future has completed 2024-11-22T04:36:42,335 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd97978f44c6a4ee80f494afa91c1edc, server=8fc3ff0a63e6,44715,1732250201012}] 2024-11-22T04:36:42,494 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:36:42,494 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => cd97978f44c6a4ee80f494afa91c1edc, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc.', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:36:42,494 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling cd97978f44c6a4ee80f494afa91c1edc 2024-11-22T04:36:42,494 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:36:42,495 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for cd97978f44c6a4ee80f494afa91c1edc 2024-11-22T04:36:42,495 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for cd97978f44c6a4ee80f494afa91c1edc 2024-11-22T04:36:42,496 INFO [StoreOpener-cd97978f44c6a4ee80f494afa91c1edc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region cd97978f44c6a4ee80f494afa91c1edc 2024-11-22T04:36:42,498 INFO [StoreOpener-cd97978f44c6a4ee80f494afa91c1edc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region cd97978f44c6a4ee80f494afa91c1edc columnFamilyName info 2024-11-22T04:36:42,498 DEBUG [StoreOpener-cd97978f44c6a4ee80f494afa91c1edc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:36:42,498 INFO [StoreOpener-cd97978f44c6a4ee80f494afa91c1edc-1 {}] regionserver.HStore(327): Store=cd97978f44c6a4ee80f494afa91c1edc/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:36:42,499 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for cd97978f44c6a4ee80f494afa91c1edc 2024-11-22T04:36:42,499 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc 2024-11-22T04:36:42,500 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc 2024-11-22T04:36:42,500 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for cd97978f44c6a4ee80f494afa91c1edc 2024-11-22T04:36:42,500 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for cd97978f44c6a4ee80f494afa91c1edc 2024-11-22T04:36:42,502 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for cd97978f44c6a4ee80f494afa91c1edc 2024-11-22T04:36:42,504 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:36:42,505 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened cd97978f44c6a4ee80f494afa91c1edc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809861, jitterRate=0.029792457818984985}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T04:36:42,505 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for cd97978f44c6a4ee80f494afa91c1edc 2024-11-22T04:36:42,505 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for cd97978f44c6a4ee80f494afa91c1edc: Running coprocessor pre-open hook at 1732250202495Writing region info on filesystem at 1732250202495Initializing all the Stores at 1732250202496 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250202496Cleaning up temporary data from old regions at 1732250202500 (+4 ms)Running coprocessor post-open hooks at 1732250202505 (+5 ms)Region opened successfully at 1732250202505 2024-11-22T04:36:42,506 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc., pid=6, masterSystemTime=1732250202489 2024-11-22T04:36:42,509 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:36:42,509 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:36:42,510 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=cd97978f44c6a4ee80f494afa91c1edc, regionState=OPEN, openSeqNum=2, regionLocation=8fc3ff0a63e6,44715,1732250201012 2024-11-22T04:36:42,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure cd97978f44c6a4ee80f494afa91c1edc, server=8fc3ff0a63e6,44715,1732250201012 because future has completed 2024-11-22T04:36:42,515 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T04:36:42,516 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure cd97978f44c6a4ee80f494afa91c1edc, server=8fc3ff0a63e6,44715,1732250201012 in 178 msec 2024-11-22T04:36:42,518 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T04:36:42,518 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=cd97978f44c6a4ee80f494afa91c1edc, ASSIGN in 339 msec 2024-11-22T04:36:42,519 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T04:36:42,520 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732250202519"}]},"ts":"1732250202519"} 2024-11-22T04:36:42,522 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-22T04:36:42,523 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T04:36:42,526 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 374 msec 2024-11-22T04:36:42,895 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:42,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:43,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:43,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:44,896 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:44,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:45,500 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T04:36:45,501 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,501 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,526 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,526 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,526 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,527 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,531 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,534 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:36:45,897 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:45,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:46,898 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:46,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:47,484 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T04:36:47,484 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-22T04:36:47,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:47,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:48,899 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:48,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:49,900 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:49,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:49,997 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-22T04:36:49,997 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-22T04:36:49,998 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T04:36:49,998 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-22T04:36:49,998 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-22T04:36:49,998 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-22T04:36:49,999 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T04:36:49,999 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T04:36:50,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:50,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:51,901 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:51,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:52,247 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T04:36:52,248 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T04:36:52,248 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-22T04:36:52,252 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T04:36:52,252 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:36:52,256 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc., hostname=8fc3ff0a63e6,44715,1732250201012, seqNum=2] 2024-11-22T04:36:52,266 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T04:36:52,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T04:36:52,275 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T04:36:52,276 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T04:36:52,277 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T04:36:52,279 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T04:36:52,444 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44715 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-22T04:36:52,444 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:36:52,445 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing cd97978f44c6a4ee80f494afa91c1edc 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T04:36:52,464 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/362de752a29e4db4a2aa8a55f1855867 is 1080, key is row0001/info:/1732250212258/Put/seqid=0 2024-11-22T04:36:52,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741837_1013 (size=6033) 2024-11-22T04:36:52,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741837_1013 (size=6033) 2024-11-22T04:36:52,475 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/362de752a29e4db4a2aa8a55f1855867 2024-11-22T04:36:52,484 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/362de752a29e4db4a2aa8a55f1855867 as hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/362de752a29e4db4a2aa8a55f1855867 2024-11-22T04:36:52,492 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/362de752a29e4db4a2aa8a55f1855867, entries=1, sequenceid=5, filesize=5.9 K 2024-11-22T04:36:52,493 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cd97978f44c6a4ee80f494afa91c1edc in 49ms, sequenceid=5, compaction requested=false 2024-11-22T04:36:52,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for cd97978f44c6a4ee80f494afa91c1edc: 2024-11-22T04:36:52,493 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:36:52,495 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-22T04:36:52,498 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-22T04:36:52,505 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-22T04:36:52,505 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 222 msec 2024-11-22T04:36:52,509 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 237 msec 2024-11-22T04:36:52,902 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:52,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:53,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:53,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:54,903 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:54,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:55,904 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:55,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:56,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:56,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:57,905 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:57,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:58,906 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:58,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:59,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:36:59,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:00,907 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:00,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:01,908 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:01,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 after 68055ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:37:01,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:01,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta after 68043ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-22T04:37:02,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-22T04:37:02,357 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T04:37:02,360 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T04:37:02,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T04:37:02,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-22T04:37:02,364 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T04:37:02,365 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T04:37:02,365 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T04:37:02,519 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44715 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-22T04:37:02,520 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:37:02,520 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing cd97978f44c6a4ee80f494afa91c1edc 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T04:37:02,528 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/2a2fac2ffdd2441db109fa6aaa2a6ce5 is 1080, key is row0002/info:/1732250222358/Put/seqid=0 2024-11-22T04:37:02,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741838_1014 (size=6033) 2024-11-22T04:37:02,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741838_1014 (size=6033) 2024-11-22T04:37:02,540 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/2a2fac2ffdd2441db109fa6aaa2a6ce5 2024-11-22T04:37:02,548 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/2a2fac2ffdd2441db109fa6aaa2a6ce5 as hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/2a2fac2ffdd2441db109fa6aaa2a6ce5 2024-11-22T04:37:02,553 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/2a2fac2ffdd2441db109fa6aaa2a6ce5, entries=1, sequenceid=9, filesize=5.9 K 2024-11-22T04:37:02,554 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cd97978f44c6a4ee80f494afa91c1edc in 34ms, sequenceid=9, compaction requested=false 2024-11-22T04:37:02,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for cd97978f44c6a4ee80f494afa91c1edc: 2024-11-22T04:37:02,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:37:02,555 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-22T04:37:02,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-22T04:37:02,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-22T04:37:02,559 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 191 msec 2024-11-22T04:37:02,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 199 msec 2024-11-22T04:37:02,909 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:02,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:03,910 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:03,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:04,911 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:04,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:05,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:05,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:06,912 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:06,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:07,913 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:07,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:08,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:08,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:09,914 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:09,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:10,830 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T04:37:10,915 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:10,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:11,916 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:11,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:12,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-22T04:37:12,407 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T04:37:12,411 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C44715%2C1732250201012.1732250232411 2024-11-22T04:37:12,418 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:12,418 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:12,418 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:12,418 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:12,418 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:12,418 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012/8fc3ff0a63e6%2C44715%2C1732250201012.1732250201632 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012/8fc3ff0a63e6%2C44715%2C1732250201012.1732250232411 2024-11-22T04:37:12,419 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43667:43667),(127.0.0.1/127.0.0.1:46101:46101)] 2024-11-22T04:37:12,419 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012/8fc3ff0a63e6%2C44715%2C1732250201012.1732250201632 is not closed yet, will try archiving it next time 2024-11-22T04:37:12,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741833_1009 (size=5546) 2024-11-22T04:37:12,421 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T04:37:12,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741833_1009 (size=5546) 2024-11-22T04:37:12,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T04:37:12,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-22T04:37:12,423 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T04:37:12,424 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T04:37:12,424 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T04:37:12,578 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=44715 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-22T04:37:12,578 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:37:12,579 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing cd97978f44c6a4ee80f494afa91c1edc 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T04:37:12,585 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/f860709150ff4091bcac3c737352b508 is 1080, key is row0003/info:/1732250232409/Put/seqid=0 2024-11-22T04:37:12,591 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741840_1016 (size=6033) 2024-11-22T04:37:12,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741840_1016 (size=6033) 2024-11-22T04:37:12,592 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/f860709150ff4091bcac3c737352b508 2024-11-22T04:37:12,600 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/f860709150ff4091bcac3c737352b508 as hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/f860709150ff4091bcac3c737352b508 2024-11-22T04:37:12,606 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/f860709150ff4091bcac3c737352b508, entries=1, sequenceid=13, filesize=5.9 K 2024-11-22T04:37:12,607 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cd97978f44c6a4ee80f494afa91c1edc in 29ms, sequenceid=13, compaction requested=true 2024-11-22T04:37:12,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for cd97978f44c6a4ee80f494afa91c1edc: 2024-11-22T04:37:12,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:37:12,607 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-22T04:37:12,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-22T04:37:12,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-22T04:37:12,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 185 msec 2024-11-22T04:37:12,614 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-11-22T04:37:12,823 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012/8fc3ff0a63e6%2C44715%2C1732250201012.1732250201632 to hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/oldWALs/8fc3ff0a63e6%2C44715%2C1732250201012.1732250201632 2024-11-22T04:37:12,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:12,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:13,917 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:13,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:14,918 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:14,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:15,919 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:15,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:16,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:16,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:17,920 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:17,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:18,921 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:18,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:19,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:19,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:20,922 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:20,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:21,923 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:21,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:22,516 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-22T04:37:22,517 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T04:37:22,517 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T04:37:22,518 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T04:37:22,518 DEBUG [Time-limited test {}] regionserver.HStore(1541): cd97978f44c6a4ee80f494afa91c1edc/info is initiating minor compaction (all files) 2024-11-22T04:37:22,518 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T04:37:22,518 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:22,518 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of cd97978f44c6a4ee80f494afa91c1edc/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:37:22,518 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/362de752a29e4db4a2aa8a55f1855867, hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/2a2fac2ffdd2441db109fa6aaa2a6ce5, hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/f860709150ff4091bcac3c737352b508] into tmpdir=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp, totalSize=17.7 K 2024-11-22T04:37:22,519 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 362de752a29e4db4a2aa8a55f1855867, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732250212258 2024-11-22T04:37:22,519 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 2a2fac2ffdd2441db109fa6aaa2a6ce5, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732250222358 2024-11-22T04:37:22,519 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting f860709150ff4091bcac3c737352b508, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732250232409 2024-11-22T04:37:22,588 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): cd97978f44c6a4ee80f494afa91c1edc#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T04:37:22,588 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/fff1141337ca4bae9b435683dc3abeed is 1080, key is row0001/info:/1732250212258/Put/seqid=0 2024-11-22T04:37:22,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741841_1017 (size=8296) 2024-11-22T04:37:22,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741841_1017 (size=8296) 2024-11-22T04:37:22,601 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/fff1141337ca4bae9b435683dc3abeed as hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/fff1141337ca4bae9b435683dc3abeed 2024-11-22T04:37:22,608 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in cd97978f44c6a4ee80f494afa91c1edc/info of cd97978f44c6a4ee80f494afa91c1edc into fff1141337ca4bae9b435683dc3abeed(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T04:37:22,609 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for cd97978f44c6a4ee80f494afa91c1edc: 2024-11-22T04:37:22,612 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C44715%2C1732250201012.1732250242611 2024-11-22T04:37:22,636 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:22,636 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:22,636 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:22,636 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:22,636 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:22,637 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012/8fc3ff0a63e6%2C44715%2C1732250201012.1732250232411 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012/8fc3ff0a63e6%2C44715%2C1732250201012.1732250242611 2024-11-22T04:37:22,637 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43667:43667),(127.0.0.1/127.0.0.1:46101:46101)] 2024-11-22T04:37:22,637 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012/8fc3ff0a63e6%2C44715%2C1732250201012.1732250232411 is not closed yet, will try archiving it next time 2024-11-22T04:37:22,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741839_1015 (size=2520) 2024-11-22T04:37:22,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741839_1015 (size=2520) 2024-11-22T04:37:22,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T04:37:22,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T04:37:22,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-22T04:37:22,640 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-22T04:37:22,642 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-22T04:37:22,642 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-22T04:37:22,794 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=44715 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-22T04:37:22,795 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:37:22,795 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing cd97978f44c6a4ee80f494afa91c1edc 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T04:37:22,799 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/abb84fc29d2d44b7a56b80650a7d3aef is 1080, key is row0000/info:/1732250242610/Put/seqid=0 2024-11-22T04:37:22,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741843_1019 (size=6033) 2024-11-22T04:37:22,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741843_1019 (size=6033) 2024-11-22T04:37:22,805 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/abb84fc29d2d44b7a56b80650a7d3aef 2024-11-22T04:37:22,812 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/abb84fc29d2d44b7a56b80650a7d3aef as hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/abb84fc29d2d44b7a56b80650a7d3aef 2024-11-22T04:37:22,818 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/abb84fc29d2d44b7a56b80650a7d3aef, entries=1, sequenceid=18, filesize=5.9 K 2024-11-22T04:37:22,819 INFO [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cd97978f44c6a4ee80f494afa91c1edc in 24ms, sequenceid=18, compaction requested=false 2024-11-22T04:37:22,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for cd97978f44c6a4ee80f494afa91c1edc: 2024-11-22T04:37:22,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:37:22,819 DEBUG [RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-22T04:37:22,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-22T04:37:22,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-22T04:37:22,824 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-11-22T04:37:22,827 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 187 msec 2024-11-22T04:37:22,897 INFO [master/8fc3ff0a63e6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T04:37:22,897 INFO [master/8fc3ff0a63e6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T04:37:22,924 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:22,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:23,925 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:23,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:24,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:24,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:25,926 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:25,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:26,927 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:26,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:27,495 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region cd97978f44c6a4ee80f494afa91c1edc, had cached 0 bytes from a total of 14329 2024-11-22T04:37:27,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:27,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:28,928 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:28,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:29,929 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:29,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:30,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:30,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:31,930 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:31,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:32,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41701 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-22T04:37:32,707 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-22T04:37:32,710 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C44715%2C1732250201012.1732250252710 2024-11-22T04:37:32,719 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,719 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,719 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,719 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,719 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,719 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012/8fc3ff0a63e6%2C44715%2C1732250201012.1732250242611 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012/8fc3ff0a63e6%2C44715%2C1732250201012.1732250252710 2024-11-22T04:37:32,720 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43667:43667),(127.0.0.1/127.0.0.1:46101:46101)] 2024-11-22T04:37:32,720 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012/8fc3ff0a63e6%2C44715%2C1732250201012.1732250242611 is not closed yet, will try archiving it next time 2024-11-22T04:37:32,720 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/WALs/8fc3ff0a63e6,44715,1732250201012/8fc3ff0a63e6%2C44715%2C1732250201012.1732250232411 to hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/oldWALs/8fc3ff0a63e6%2C44715%2C1732250201012.1732250232411 2024-11-22T04:37:32,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T04:37:32,720 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T04:37:32,720 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:37:32,720 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:37:32,720 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:37:32,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741842_1018 (size=2026) 2024-11-22T04:37:32,721 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T04:37:32,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741842_1018 (size=2026) 2024-11-22T04:37:32,721 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=503815883, stopped=false 2024-11-22T04:37:32,721 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=8fc3ff0a63e6,41701,1732250200850 2024-11-22T04:37:32,721 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T04:37:32,759 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T04:37:32,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:37:32,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:37:32,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:32,759 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:32,760 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T04:37:32,760 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:37:32,760 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:37:32,760 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '8fc3ff0a63e6,44715,1732250201012' ***** 2024-11-22T04:37:32,760 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T04:37:32,760 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:37:32,760 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:37:32,761 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T04:37:32,761 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T04:37:32,761 INFO [RS:0;8fc3ff0a63e6:44715 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T04:37:32,761 INFO [RS:0;8fc3ff0a63e6:44715 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T04:37:32,761 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(3091): Received CLOSE for cd97978f44c6a4ee80f494afa91c1edc 2024-11-22T04:37:32,761 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(959): stopping server 8fc3ff0a63e6,44715,1732250201012 2024-11-22T04:37:32,762 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:37:32,762 INFO [RS:0;8fc3ff0a63e6:44715 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;8fc3ff0a63e6:44715. 2024-11-22T04:37:32,762 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:37:32,762 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing cd97978f44c6a4ee80f494afa91c1edc, disabling compactions & flushes 2024-11-22T04:37:32,762 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:37:32,762 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:37:32,762 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:37:32,762 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T04:37:32,762 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T04:37:32,762 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. after waiting 0 ms 2024-11-22T04:37:32,762 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T04:37:32,762 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:37:32,762 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T04:37:32,762 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing cd97978f44c6a4ee80f494afa91c1edc 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-22T04:37:32,762 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-22T04:37:32,762 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(1325): Online Regions={cd97978f44c6a4ee80f494afa91c1edc=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc., 1588230740=hbase:meta,,1.1588230740} 2024-11-22T04:37:32,763 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, cd97978f44c6a4ee80f494afa91c1edc 2024-11-22T04:37:32,763 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T04:37:32,763 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T04:37:32,763 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T04:37:32,763 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T04:37:32,763 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T04:37:32,763 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-22T04:37:32,768 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/093764f3f0e84074a40c56873b3f03fc is 1080, key is row0001/info:/1732250252708/Put/seqid=0 2024-11-22T04:37:32,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741845_1021 (size=6033) 2024-11-22T04:37:32,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741845_1021 (size=6033) 2024-11-22T04:37:32,773 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/093764f3f0e84074a40c56873b3f03fc 2024-11-22T04:37:32,779 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/.tmp/info/093764f3f0e84074a40c56873b3f03fc as hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/093764f3f0e84074a40c56873b3f03fc 2024-11-22T04:37:32,783 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/.tmp/info/4f1f6a3112b44eef8a50cadc5bb371d6 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc./info:regioninfo/1732250202509/Put/seqid=0 2024-11-22T04:37:32,792 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/093764f3f0e84074a40c56873b3f03fc, entries=1, sequenceid=22, filesize=5.9 K 2024-11-22T04:37:32,793 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cd97978f44c6a4ee80f494afa91c1edc in 31ms, sequenceid=22, compaction requested=true 2024-11-22T04:37:32,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741846_1022 (size=7308) 2024-11-22T04:37:32,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741846_1022 (size=7308) 2024-11-22T04:37:32,798 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/.tmp/info/4f1f6a3112b44eef8a50cadc5bb371d6 2024-11-22T04:37:32,800 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/362de752a29e4db4a2aa8a55f1855867, hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/2a2fac2ffdd2441db109fa6aaa2a6ce5, hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/f860709150ff4091bcac3c737352b508] to archive 2024-11-22T04:37:32,801 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T04:37:32,803 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/362de752a29e4db4a2aa8a55f1855867 to hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/362de752a29e4db4a2aa8a55f1855867 2024-11-22T04:37:32,804 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/2a2fac2ffdd2441db109fa6aaa2a6ce5 to hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/2a2fac2ffdd2441db109fa6aaa2a6ce5 2024-11-22T04:37:32,806 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/f860709150ff4091bcac3c737352b508 to hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/info/f860709150ff4091bcac3c737352b508 2024-11-22T04:37:32,806 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=8fc3ff0a63e6:41701 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-22T04:37:32,806 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [362de752a29e4db4a2aa8a55f1855867=6033, 2a2fac2ffdd2441db109fa6aaa2a6ce5=6033, f860709150ff4091bcac3c737352b508=6033] 2024-11-22T04:37:32,810 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/cd97978f44c6a4ee80f494afa91c1edc/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-22T04:37:32,811 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:37:32,811 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for cd97978f44c6a4ee80f494afa91c1edc: Waiting for close lock at 1732250252762Running coprocessor pre-close hooks at 1732250252762Disabling compacts and flushes for region at 1732250252762Disabling writes for close at 1732250252762Obtaining lock to block concurrent updates at 1732250252762Preparing flush snapshotting stores in cd97978f44c6a4ee80f494afa91c1edc at 1732250252762Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732250252763 (+1 ms)Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. at 1732250252763Flushing cd97978f44c6a4ee80f494afa91c1edc/info: creating writer at 1732250252764 (+1 ms)Flushing cd97978f44c6a4ee80f494afa91c1edc/info: appending metadata at 1732250252767 (+3 ms)Flushing cd97978f44c6a4ee80f494afa91c1edc/info: closing flushed file at 1732250252767Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5a1ff32c: reopening flushed file at 1732250252778 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for cd97978f44c6a4ee80f494afa91c1edc in 31ms, sequenceid=22, compaction requested=true at 1732250252793 (+15 ms)Writing region close event to WAL at 1732250252807 (+14 ms)Running coprocessor post-close hooks at 1732250252811 (+4 ms)Closed at 1732250252811 2024-11-22T04:37:32,811 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732250202149.cd97978f44c6a4ee80f494afa91c1edc. 2024-11-22T04:37:32,819 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/.tmp/ns/6174a79023694d47a518e8f62f4bef3f is 43, key is default/ns:d/1732250202049/Put/seqid=0 2024-11-22T04:37:32,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741847_1023 (size=5153) 2024-11-22T04:37:32,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741847_1023 (size=5153) 2024-11-22T04:37:32,823 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/.tmp/ns/6174a79023694d47a518e8f62f4bef3f 2024-11-22T04:37:32,843 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/.tmp/table/c3a7b672e1d9431d99d7e598c7a3995c is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732250202519/Put/seqid=0 2024-11-22T04:37:32,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741848_1024 (size=5508) 2024-11-22T04:37:32,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741848_1024 (size=5508) 2024-11-22T04:37:32,848 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/.tmp/table/c3a7b672e1d9431d99d7e598c7a3995c 2024-11-22T04:37:32,853 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/.tmp/info/4f1f6a3112b44eef8a50cadc5bb371d6 as hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/info/4f1f6a3112b44eef8a50cadc5bb371d6 2024-11-22T04:37:32,859 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/info/4f1f6a3112b44eef8a50cadc5bb371d6, entries=10, sequenceid=11, filesize=7.1 K 2024-11-22T04:37:32,870 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/.tmp/ns/6174a79023694d47a518e8f62f4bef3f as hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/ns/6174a79023694d47a518e8f62f4bef3f 2024-11-22T04:37:32,877 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/ns/6174a79023694d47a518e8f62f4bef3f, entries=2, sequenceid=11, filesize=5.0 K 2024-11-22T04:37:32,878 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/.tmp/table/c3a7b672e1d9431d99d7e598c7a3995c as hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/table/c3a7b672e1d9431d99d7e598c7a3995c 2024-11-22T04:37:32,884 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/table/c3a7b672e1d9431d99d7e598c7a3995c, entries=2, sequenceid=11, filesize=5.4 K 2024-11-22T04:37:32,885 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=11, compaction requested=false 2024-11-22T04:37:32,889 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-22T04:37:32,890 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T04:37:32,890 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T04:37:32,890 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732250252763Running coprocessor pre-close hooks at 1732250252763Disabling compacts and flushes for region at 1732250252763Disabling writes for close at 1732250252763Obtaining lock to block concurrent updates at 1732250252763Preparing flush snapshotting stores in 1588230740 at 1732250252763Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732250252763Flushing stores of hbase:meta,,1.1588230740 at 1732250252764 (+1 ms)Flushing 1588230740/info: creating writer at 1732250252764Flushing 1588230740/info: appending metadata at 1732250252783 (+19 ms)Flushing 1588230740/info: closing flushed file at 1732250252783Flushing 1588230740/ns: creating writer at 1732250252804 (+21 ms)Flushing 1588230740/ns: appending metadata at 1732250252818 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732250252818Flushing 1588230740/table: creating writer at 1732250252827 (+9 ms)Flushing 1588230740/table: appending metadata at 1732250252842 (+15 ms)Flushing 1588230740/table: closing flushed file at 1732250252842Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56b898a9: reopening flushed file at 1732250252852 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@292b4649: reopening flushed file at 1732250252859 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@52183905: reopening flushed file at 1732250252877 (+18 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=11, compaction requested=false at 1732250252885 (+8 ms)Writing region close event to WAL at 1732250252886 (+1 ms)Running coprocessor post-close hooks at 1732250252890 (+4 ms)Closed at 1732250252890 2024-11-22T04:37:32,890 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T04:37:32,931 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:32,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:32,963 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(976): stopping server 8fc3ff0a63e6,44715,1732250201012; all regions closed. 2024-11-22T04:37:32,963 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,963 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,963 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,963 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,963 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741834_1010 (size=3306) 2024-11-22T04:37:32,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741834_1010 (size=3306) 2024-11-22T04:37:32,967 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/oldWALs 2024-11-22T04:37:32,968 INFO [RS:0;8fc3ff0a63e6:44715 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C44715%2C1732250201012.meta:.meta(num 1732250201963) 2024-11-22T04:37:32,968 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,968 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,968 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,968 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,968 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:32,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741844_1020 (size=1252) 2024-11-22T04:37:32,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741844_1020 (size=1252) 2024-11-22T04:37:32,973 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/oldWALs 2024-11-22T04:37:32,973 INFO [RS:0;8fc3ff0a63e6:44715 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C44715%2C1732250201012:(num 1732250252710) 2024-11-22T04:37:32,973 DEBUG [RS:0;8fc3ff0a63e6:44715 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:37:32,973 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:37:32,973 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:37:32,973 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.ChoreService(370): Chore service for: regionserver/8fc3ff0a63e6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T04:37:32,973 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:37:32,973 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:37:32,973 INFO [RS:0;8fc3ff0a63e6:44715 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44715 2024-11-22T04:37:32,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8fc3ff0a63e6,44715,1732250201012 2024-11-22T04:37:32,980 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:37:32,980 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:37:32,981 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8fc3ff0a63e6,44715,1732250201012] 2024-11-22T04:37:33,001 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/8fc3ff0a63e6,44715,1732250201012 already deleted, retry=false 2024-11-22T04:37:33,001 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 8fc3ff0a63e6,44715,1732250201012 expired; onlineServers=0 2024-11-22T04:37:33,001 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '8fc3ff0a63e6,41701,1732250200850' ***** 2024-11-22T04:37:33,001 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T04:37:33,001 INFO [M:0;8fc3ff0a63e6:41701 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:37:33,001 INFO [M:0;8fc3ff0a63e6:41701 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:37:33,001 DEBUG [M:0;8fc3ff0a63e6:41701 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T04:37:33,001 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T04:37:33,001 DEBUG [M:0;8fc3ff0a63e6:41701 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T04:37:33,001 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250201340 {}] cleaner.HFileCleaner(306): Exit Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250201340,5,FailOnTimeoutGroup] 2024-11-22T04:37:33,001 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250201341 {}] cleaner.HFileCleaner(306): Exit Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250201341,5,FailOnTimeoutGroup] 2024-11-22T04:37:33,002 INFO [M:0;8fc3ff0a63e6:41701 {}] hbase.ChoreService(370): Chore service for: master/8fc3ff0a63e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T04:37:33,002 INFO [M:0;8fc3ff0a63e6:41701 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:37:33,002 DEBUG [M:0;8fc3ff0a63e6:41701 {}] master.HMaster(1795): Stopping service threads 2024-11-22T04:37:33,002 INFO [M:0;8fc3ff0a63e6:41701 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T04:37:33,002 INFO [M:0;8fc3ff0a63e6:41701 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T04:37:33,002 INFO [M:0;8fc3ff0a63e6:41701 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T04:37:33,002 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T04:37:33,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T04:37:33,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:33,012 DEBUG [M:0;8fc3ff0a63e6:41701 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-22T04:37:33,012 DEBUG [M:0;8fc3ff0a63e6:41701 {}] master.ActiveMasterManager(353): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-22T04:37:33,012 INFO [M:0;8fc3ff0a63e6:41701 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/.lastflushedseqids 2024-11-22T04:37:33,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741849_1025 (size=130) 2024-11-22T04:37:33,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741849_1025 (size=130) 2024-11-22T04:37:33,018 INFO [M:0;8fc3ff0a63e6:41701 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T04:37:33,019 INFO [M:0;8fc3ff0a63e6:41701 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T04:37:33,019 DEBUG [M:0;8fc3ff0a63e6:41701 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T04:37:33,019 INFO [M:0;8fc3ff0a63e6:41701 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:37:33,019 DEBUG [M:0;8fc3ff0a63e6:41701 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:37:33,019 DEBUG [M:0;8fc3ff0a63e6:41701 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T04:37:33,019 DEBUG [M:0;8fc3ff0a63e6:41701 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:37:33,019 INFO [M:0;8fc3ff0a63e6:41701 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-11-22T04:37:33,035 DEBUG [M:0;8fc3ff0a63e6:41701 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0c4855dc2429497a966da1fc36831da9 is 82, key is hbase:meta,,1/info:regioninfo/1732250201997/Put/seqid=0 2024-11-22T04:37:33,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741850_1026 (size=5672) 2024-11-22T04:37:33,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741850_1026 (size=5672) 2024-11-22T04:37:33,040 INFO [M:0;8fc3ff0a63e6:41701 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0c4855dc2429497a966da1fc36831da9 2024-11-22T04:37:33,061 DEBUG [M:0;8fc3ff0a63e6:41701 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/63dc9a644ecd40ca8bf852c2948d3c6c is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732250202525/Put/seqid=0 2024-11-22T04:37:33,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741851_1027 (size=7823) 2024-11-22T04:37:33,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741851_1027 (size=7823) 2024-11-22T04:37:33,066 INFO [M:0;8fc3ff0a63e6:41701 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/63dc9a644ecd40ca8bf852c2948d3c6c 2024-11-22T04:37:33,070 INFO [M:0;8fc3ff0a63e6:41701 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 63dc9a644ecd40ca8bf852c2948d3c6c 2024-11-22T04:37:33,088 DEBUG [M:0;8fc3ff0a63e6:41701 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bd39831749964e9495f0ee3731e9bcb6 is 69, key is 8fc3ff0a63e6,44715,1732250201012/rs:state/1732250201467/Put/seqid=0 2024-11-22T04:37:33,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:37:33,091 INFO [RS:0;8fc3ff0a63e6:44715 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:37:33,091 INFO [RS:0;8fc3ff0a63e6:44715 {}] regionserver.HRegionServer(1031): Exiting; stopping=8fc3ff0a63e6,44715,1732250201012; zookeeper connection closed. 2024-11-22T04:37:33,091 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44715-0x10160d466ac0001, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:37:33,091 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3265da1b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3265da1b 2024-11-22T04:37:33,091 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T04:37:33,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741852_1028 (size=5156) 2024-11-22T04:37:33,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741852_1028 (size=5156) 2024-11-22T04:37:33,093 INFO [M:0;8fc3ff0a63e6:41701 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bd39831749964e9495f0ee3731e9bcb6 2024-11-22T04:37:33,116 DEBUG [M:0;8fc3ff0a63e6:41701 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/be4e0984bf1c4cd9a883f50d9dbdb2f8 is 52, key is load_balancer_on/state:d/1732250202144/Put/seqid=0 2024-11-22T04:37:33,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741853_1029 (size=5056) 2024-11-22T04:37:33,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741853_1029 (size=5056) 2024-11-22T04:37:33,125 INFO [M:0;8fc3ff0a63e6:41701 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/be4e0984bf1c4cd9a883f50d9dbdb2f8 2024-11-22T04:37:33,132 DEBUG [M:0;8fc3ff0a63e6:41701 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/0c4855dc2429497a966da1fc36831da9 as hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0c4855dc2429497a966da1fc36831da9 2024-11-22T04:37:33,138 INFO [M:0;8fc3ff0a63e6:41701 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/0c4855dc2429497a966da1fc36831da9, entries=8, sequenceid=121, filesize=5.5 K 2024-11-22T04:37:33,139 DEBUG [M:0;8fc3ff0a63e6:41701 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/63dc9a644ecd40ca8bf852c2948d3c6c as hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/63dc9a644ecd40ca8bf852c2948d3c6c 2024-11-22T04:37:33,144 INFO [M:0;8fc3ff0a63e6:41701 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 63dc9a644ecd40ca8bf852c2948d3c6c 2024-11-22T04:37:33,144 INFO [M:0;8fc3ff0a63e6:41701 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/63dc9a644ecd40ca8bf852c2948d3c6c, entries=14, sequenceid=121, filesize=7.6 K 2024-11-22T04:37:33,146 DEBUG [M:0;8fc3ff0a63e6:41701 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/bd39831749964e9495f0ee3731e9bcb6 as hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bd39831749964e9495f0ee3731e9bcb6 2024-11-22T04:37:33,151 INFO [M:0;8fc3ff0a63e6:41701 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/bd39831749964e9495f0ee3731e9bcb6, entries=1, sequenceid=121, filesize=5.0 K 2024-11-22T04:37:33,152 DEBUG [M:0;8fc3ff0a63e6:41701 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/be4e0984bf1c4cd9a883f50d9dbdb2f8 as hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/be4e0984bf1c4cd9a883f50d9dbdb2f8 2024-11-22T04:37:33,157 INFO [M:0;8fc3ff0a63e6:41701 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35351/user/jenkins/test-data/a047a081-2e6c-c7f9-ef79-69eb641fc5af/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/be4e0984bf1c4cd9a883f50d9dbdb2f8, entries=1, sequenceid=121, filesize=4.9 K 2024-11-22T04:37:33,159 INFO [M:0;8fc3ff0a63e6:41701 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 139ms, sequenceid=121, compaction requested=false 2024-11-22T04:37:33,164 INFO [M:0;8fc3ff0a63e6:41701 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:37:33,164 DEBUG [M:0;8fc3ff0a63e6:41701 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732250253019Disabling compacts and flushes for region at 1732250253019Disabling writes for close at 1732250253019Obtaining lock to block concurrent updates at 1732250253019Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732250253019Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44638, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1732250253019Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732250253020 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732250253020Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732250253035 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732250253035Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732250253045 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732250253060 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732250253060Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732250253070 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732250253087 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732250253087Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732250253098 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732250253116 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732250253116Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@77f3da5f: reopening flushed file at 1732250253131 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@16ac330f: reopening flushed file at 1732250253138 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@65f965c6: reopening flushed file at 1732250253145 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1859b33f: reopening flushed file at 1732250253151 (+6 ms)Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 139ms, sequenceid=121, compaction requested=false at 1732250253159 (+8 ms)Writing region close event to WAL at 1732250253164 (+5 ms)Closed at 1732250253164 2024-11-22T04:37:33,164 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:33,164 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:33,164 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:33,164 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:33,165 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:37:33,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35945 is added to blk_1073741830_1006 (size=53035) 2024-11-22T04:37:33,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39075 is added to blk_1073741830_1006 (size=53035) 2024-11-22T04:37:33,167 INFO [M:0;8fc3ff0a63e6:41701 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T04:37:33,167 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:37:33,167 INFO [M:0;8fc3ff0a63e6:41701 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41701 2024-11-22T04:37:33,167 INFO [M:0;8fc3ff0a63e6:41701 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:37:33,280 INFO [M:0;8fc3ff0a63e6:41701 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:37:33,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:37:33,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41701-0x10160d466ac0000, quorum=127.0.0.1:51821, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:37:33,282 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@101a8f4e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:37:33,283 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1523019d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:37:33,283 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:37:33,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f13d08d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:37:33,283 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@62aa92bd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/hadoop.log.dir/,STOPPED} 2024-11-22T04:37:33,285 WARN [BP-570972098-172.17.0.2-1732250198543 heartbeating to localhost/127.0.0.1:35351 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:37:33,285 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:37:33,285 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:37:33,285 WARN [BP-570972098-172.17.0.2-1732250198543 heartbeating to localhost/127.0.0.1:35351 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-570972098-172.17.0.2-1732250198543 (Datanode Uuid 34a699fb-b146-4559-b8e9-09143ffca707) service to localhost/127.0.0.1:35351 2024-11-22T04:37:33,285 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/cluster_6d578ef8-ef74-bdc1-cf72-7348f562c462/data/data3/current/BP-570972098-172.17.0.2-1732250198543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:37:33,285 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/cluster_6d578ef8-ef74-bdc1-cf72-7348f562c462/data/data4/current/BP-570972098-172.17.0.2-1732250198543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:37:33,286 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:37:33,288 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@34534d9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:37:33,288 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1b7fc8f3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:37:33,288 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:37:33,288 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@750c8565{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:37:33,288 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f17b515{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/hadoop.log.dir/,STOPPED} 2024-11-22T04:37:33,290 WARN [BP-570972098-172.17.0.2-1732250198543 heartbeating to localhost/127.0.0.1:35351 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:37:33,290 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:37:33,290 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:37:33,290 WARN [BP-570972098-172.17.0.2-1732250198543 heartbeating to localhost/127.0.0.1:35351 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-570972098-172.17.0.2-1732250198543 (Datanode Uuid df110a1d-1043-4639-84f1-0da6f890261d) service to localhost/127.0.0.1:35351 2024-11-22T04:37:33,291 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/cluster_6d578ef8-ef74-bdc1-cf72-7348f562c462/data/data1/current/BP-570972098-172.17.0.2-1732250198543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:37:33,291 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/cluster_6d578ef8-ef74-bdc1-cf72-7348f562c462/data/data2/current/BP-570972098-172.17.0.2-1732250198543 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:37:33,291 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:37:33,297 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b135886{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T04:37:33,297 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1bfe295d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:37:33,297 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:37:33,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@618f0457{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:37:33,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6bd1231f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/hadoop.log.dir/,STOPPED} 2024-11-22T04:37:33,303 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T04:37:33,330 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T04:37:33,339 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=205 (was 179) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35351 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35351 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35351 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:35351 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:35351 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:35351 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:35351 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:35351 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=228 (was 265), ProcessCount=11 (was 11), AvailableMemoryMB=8310 (was 8462) 2024-11-22T04:37:33,347 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=205, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=228, ProcessCount=11, AvailableMemoryMB=8310 2024-11-22T04:37:33,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T04:37:33,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/hadoop.log.dir so I do NOT create it in target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174 2024-11-22T04:37:33,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/f8097292-bce1-58f3-5d20-2406748dc7f3/hadoop.tmp.dir so I do NOT create it in target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174 2024-11-22T04:37:33,347 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/cluster_a0b9ba52-7c91-1fc9-c6a0-3c04a754b7bf, deleteOnExit=true 2024-11-22T04:37:33,347 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T04:37:33,348 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/test.cache.data in system properties and HBase conf 2024-11-22T04:37:33,348 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T04:37:33,348 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/hadoop.log.dir in system properties and HBase conf 2024-11-22T04:37:33,348 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T04:37:33,348 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T04:37:33,348 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T04:37:33,348 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T04:37:33,348 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T04:37:33,348 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T04:37:33,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T04:37:33,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T04:37:33,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T04:37:33,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T04:37:33,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T04:37:33,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T04:37:33,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T04:37:33,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/nfs.dump.dir in system properties and HBase conf 2024-11-22T04:37:33,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/java.io.tmpdir in system properties and HBase conf 2024-11-22T04:37:33,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T04:37:33,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T04:37:33,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T04:37:33,362 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T04:37:33,497 INFO [regionserver/8fc3ff0a63e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:37:33,734 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:37:33,737 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:37:33,738 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:37:33,738 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:37:33,739 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T04:37:33,739 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:37:33,739 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@64d2170c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:37:33,740 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@41a74ab6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:37:33,858 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@417c1a7a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/java.io.tmpdir/jetty-localhost-42331-hadoop-hdfs-3_4_1-tests_jar-_-any-11166198452237026316/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T04:37:33,859 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@78512cf7{HTTP/1.1, (http/1.1)}{localhost:42331} 2024-11-22T04:37:33,859 INFO [Time-limited test {}] server.Server(415): Started @251125ms 2024-11-22T04:37:33,872 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T04:37:33,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:33,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:34,121 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:37:34,123 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:37:34,124 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:37:34,124 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:37:34,124 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T04:37:34,125 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27d3c271{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:37:34,125 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49490ce4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:37:34,228 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@49bf1df8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/java.io.tmpdir/jetty-localhost-38463-hadoop-hdfs-3_4_1-tests_jar-_-any-9617767361395445793/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:37:34,228 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7d4c2da4{HTTP/1.1, (http/1.1)}{localhost:38463} 2024-11-22T04:37:34,228 INFO [Time-limited test {}] server.Server(415): Started @251495ms 2024-11-22T04:37:34,229 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:37:34,254 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:37:34,256 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:37:34,257 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:37:34,257 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:37:34,257 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:37:34,257 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60ddaed5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:37:34,257 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@48123521{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:37:34,359 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5df314d1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/java.io.tmpdir/jetty-localhost-43327-hadoop-hdfs-3_4_1-tests_jar-_-any-14660899973300828313/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:37:34,360 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a100062{HTTP/1.1, (http/1.1)}{localhost:43327} 2024-11-22T04:37:34,360 INFO [Time-limited test {}] server.Server(415): Started @251626ms 2024-11-22T04:37:34,361 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:37:34,932 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:34,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:35,443 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/cluster_a0b9ba52-7c91-1fc9-c6a0-3c04a754b7bf/data/data2/current/BP-1915570739-172.17.0.2-1732250253374/current, will proceed with Du for space computation calculation, 2024-11-22T04:37:35,443 WARN [Thread-1971 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/cluster_a0b9ba52-7c91-1fc9-c6a0-3c04a754b7bf/data/data1/current/BP-1915570739-172.17.0.2-1732250253374/current, will proceed with Du for space computation calculation, 2024-11-22T04:37:35,463 WARN [Thread-1935 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:37:35,465 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8bea46a6ebd62b6 with lease ID 0xcde9c23e6b1e4eef: Processing first storage report for DS-4227ec3d-d158-45f5-bef0-f177edefe993 from datanode DatanodeRegistration(127.0.0.1:34485, datanodeUuid=cfefd538-19df-479f-baf7-bb581da100ed, infoPort=41503, infoSecurePort=0, ipcPort=32933, storageInfo=lv=-57;cid=testClusterID;nsid=1909123143;c=1732250253374) 2024-11-22T04:37:35,465 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8bea46a6ebd62b6 with lease ID 0xcde9c23e6b1e4eef: from storage DS-4227ec3d-d158-45f5-bef0-f177edefe993 node DatanodeRegistration(127.0.0.1:34485, datanodeUuid=cfefd538-19df-479f-baf7-bb581da100ed, infoPort=41503, infoSecurePort=0, ipcPort=32933, storageInfo=lv=-57;cid=testClusterID;nsid=1909123143;c=1732250253374), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:37:35,465 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8bea46a6ebd62b6 with lease ID 0xcde9c23e6b1e4eef: Processing first storage report for DS-e2cc5ea4-7fd1-4f24-99f5-3e541a6544e6 from datanode DatanodeRegistration(127.0.0.1:34485, datanodeUuid=cfefd538-19df-479f-baf7-bb581da100ed, infoPort=41503, infoSecurePort=0, ipcPort=32933, storageInfo=lv=-57;cid=testClusterID;nsid=1909123143;c=1732250253374) 2024-11-22T04:37:35,465 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8bea46a6ebd62b6 with lease ID 0xcde9c23e6b1e4eef: from storage DS-e2cc5ea4-7fd1-4f24-99f5-3e541a6544e6 node DatanodeRegistration(127.0.0.1:34485, datanodeUuid=cfefd538-19df-479f-baf7-bb581da100ed, infoPort=41503, infoSecurePort=0, ipcPort=32933, storageInfo=lv=-57;cid=testClusterID;nsid=1909123143;c=1732250253374), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:37:35,592 WARN [Thread-1982 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/cluster_a0b9ba52-7c91-1fc9-c6a0-3c04a754b7bf/data/data3/current/BP-1915570739-172.17.0.2-1732250253374/current, will proceed with Du for space computation calculation, 2024-11-22T04:37:35,592 WARN [Thread-1983 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/cluster_a0b9ba52-7c91-1fc9-c6a0-3c04a754b7bf/data/data4/current/BP-1915570739-172.17.0.2-1732250253374/current, will proceed with Du for space computation calculation, 2024-11-22T04:37:35,610 WARN [Thread-1958 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:37:35,612 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc51427c6bc83ba15 with lease ID 0xcde9c23e6b1e4ef0: Processing first storage report for DS-6ad73cbe-fb82-4dc9-b351-bbed140dc3ca from datanode DatanodeRegistration(127.0.0.1:41117, datanodeUuid=7db8585e-eb95-4561-af0c-229b4745743f, infoPort=36505, infoSecurePort=0, ipcPort=46317, storageInfo=lv=-57;cid=testClusterID;nsid=1909123143;c=1732250253374) 2024-11-22T04:37:35,612 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc51427c6bc83ba15 with lease ID 0xcde9c23e6b1e4ef0: from storage DS-6ad73cbe-fb82-4dc9-b351-bbed140dc3ca node DatanodeRegistration(127.0.0.1:41117, datanodeUuid=7db8585e-eb95-4561-af0c-229b4745743f, infoPort=36505, infoSecurePort=0, ipcPort=46317, storageInfo=lv=-57;cid=testClusterID;nsid=1909123143;c=1732250253374), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:37:35,612 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc51427c6bc83ba15 with lease ID 0xcde9c23e6b1e4ef0: Processing first storage report for DS-27dc4ba6-05f8-430d-9fbb-ecb99809f9de from datanode DatanodeRegistration(127.0.0.1:41117, datanodeUuid=7db8585e-eb95-4561-af0c-229b4745743f, infoPort=36505, infoSecurePort=0, ipcPort=46317, storageInfo=lv=-57;cid=testClusterID;nsid=1909123143;c=1732250253374) 2024-11-22T04:37:35,612 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc51427c6bc83ba15 with lease ID 0xcde9c23e6b1e4ef0: from storage DS-27dc4ba6-05f8-430d-9fbb-ecb99809f9de node DatanodeRegistration(127.0.0.1:41117, datanodeUuid=7db8585e-eb95-4561-af0c-229b4745743f, infoPort=36505, infoSecurePort=0, ipcPort=46317, storageInfo=lv=-57;cid=testClusterID;nsid=1909123143;c=1732250253374), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:37:35,699 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174 2024-11-22T04:37:35,703 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/cluster_a0b9ba52-7c91-1fc9-c6a0-3c04a754b7bf/zookeeper_0, clientPort=60013, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/cluster_a0b9ba52-7c91-1fc9-c6a0-3c04a754b7bf/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/cluster_a0b9ba52-7c91-1fc9-c6a0-3c04a754b7bf/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T04:37:35,705 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60013 2024-11-22T04:37:35,705 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:37:35,707 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:37:35,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741825_1001 (size=7) 2024-11-22T04:37:35,715 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741825_1001 (size=7) 2024-11-22T04:37:35,716 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b with version=8 2024-11-22T04:37:35,716 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/hbase-staging 2024-11-22T04:37:35,718 INFO [Time-limited test {}] client.ConnectionUtils(128): master/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:37:35,719 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:37:35,719 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:37:35,719 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:37:35,719 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:37:35,719 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:37:35,719 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T04:37:35,719 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:37:35,720 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41945 2024-11-22T04:37:35,722 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41945 connecting to ZooKeeper ensemble=127.0.0.1:60013 2024-11-22T04:37:35,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:419450x0, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:37:35,833 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41945-0x10160d53d010000 connected 2024-11-22T04:37:35,933 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:35,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:35,942 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:37:35,945 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:37:35,948 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:37:35,948 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b, hbase.cluster.distributed=false 2024-11-22T04:37:35,950 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:37:35,951 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41945 2024-11-22T04:37:35,951 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41945 2024-11-22T04:37:35,951 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41945 2024-11-22T04:37:35,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41945 2024-11-22T04:37:35,952 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41945 2024-11-22T04:37:35,971 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:37:35,971 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:37:35,971 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:37:35,971 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:37:35,971 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:37:35,971 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:37:35,971 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T04:37:35,971 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:37:35,972 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45737 2024-11-22T04:37:35,973 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45737 connecting to ZooKeeper ensemble=127.0.0.1:60013 2024-11-22T04:37:35,974 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:37:35,976 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:37:35,984 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:457370x0, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:37:35,985 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:457370x0, quorum=127.0.0.1:60013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:37:35,985 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45737-0x10160d53d010001 connected 2024-11-22T04:37:35,985 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T04:37:35,985 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T04:37:35,986 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T04:37:35,987 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:37:35,987 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45737 2024-11-22T04:37:35,987 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45737 2024-11-22T04:37:35,988 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45737 2024-11-22T04:37:35,989 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45737 2024-11-22T04:37:35,989 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45737 2024-11-22T04:37:36,001 DEBUG [M:0;8fc3ff0a63e6:41945 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;8fc3ff0a63e6:41945 2024-11-22T04:37:36,002 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/8fc3ff0a63e6,41945,1732250255718 2024-11-22T04:37:36,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:37:36,013 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:37:36,013 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/8fc3ff0a63e6,41945,1732250255718 2024-11-22T04:37:36,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T04:37:36,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:36,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:36,027 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T04:37:36,027 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/8fc3ff0a63e6,41945,1732250255718 from backup master directory 2024-11-22T04:37:36,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/8fc3ff0a63e6,41945,1732250255718 2024-11-22T04:37:36,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:37:36,037 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:37:36,037 WARN [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:37:36,037 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=8fc3ff0a63e6,41945,1732250255718 2024-11-22T04:37:36,041 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/hbase.id] with ID: 64d2dbb3-c2b5-41e8-8d4d-c9218da7d931 2024-11-22T04:37:36,041 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/.tmp/hbase.id 2024-11-22T04:37:36,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:37:36,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:37:36,046 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/.tmp/hbase.id]:[hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/hbase.id] 2024-11-22T04:37:36,063 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:37:36,063 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T04:37:36,065 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-22T04:37:36,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:36,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:36,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741827_1003 (size=196) 2024-11-22T04:37:36,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741827_1003 (size=196) 2024-11-22T04:37:36,083 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T04:37:36,084 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T04:37:36,084 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:37:36,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:37:36,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:37:36,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store 2024-11-22T04:37:36,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741829_1005 (size=34) 2024-11-22T04:37:36,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741829_1005 (size=34) 2024-11-22T04:37:36,099 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:37:36,099 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T04:37:36,099 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:37:36,099 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:37:36,099 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T04:37:36,099 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:37:36,100 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:37:36,100 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732250256099Disabling compacts and flushes for region at 1732250256099Disabling writes for close at 1732250256099Writing region close event to WAL at 1732250256099Closed at 1732250256099 2024-11-22T04:37:36,100 WARN [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/.initializing 2024-11-22T04:37:36,101 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/WALs/8fc3ff0a63e6,41945,1732250255718 2024-11-22T04:37:36,104 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C41945%2C1732250255718, suffix=, logDir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/WALs/8fc3ff0a63e6,41945,1732250255718, archiveDir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/oldWALs, maxLogs=10 2024-11-22T04:37:36,104 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C41945%2C1732250255718.1732250256104 2024-11-22T04:37:36,110 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/WALs/8fc3ff0a63e6,41945,1732250255718/8fc3ff0a63e6%2C41945%2C1732250255718.1732250256104 2024-11-22T04:37:36,111 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41503:41503),(127.0.0.1/127.0.0.1:36505:36505)] 2024-11-22T04:37:36,111 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:37:36,111 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:37:36,111 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:37:36,111 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:37:36,113 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:37:36,114 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T04:37:36,114 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:36,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:37:36,115 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:37:36,116 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T04:37:36,116 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:36,116 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:37:36,117 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:37:36,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T04:37:36,118 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:36,118 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:37:36,119 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:37:36,121 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T04:37:36,121 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:36,122 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:37:36,122 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:37:36,122 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:37:36,122 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:37:36,124 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:37:36,124 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:37:36,124 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T04:37:36,125 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:37:36,127 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:37:36,128 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=874367, jitterRate=0.11181552708148956}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T04:37:36,128 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732250256112Initializing all the Stores at 1732250256112Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250256112Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250256113 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250256113Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250256113Cleaning up temporary data from old regions at 1732250256124 (+11 ms)Region opened successfully at 1732250256128 (+4 ms) 2024-11-22T04:37:36,128 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T04:37:36,131 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34f8cef5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:37:36,132 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T04:37:36,132 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T04:37:36,132 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T04:37:36,132 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T04:37:36,133 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T04:37:36,133 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T04:37:36,133 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T04:37:36,135 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T04:37:36,136 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T04:37:36,142 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T04:37:36,142 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T04:37:36,143 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T04:37:36,152 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T04:37:36,153 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T04:37:36,154 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T04:37:36,163 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T04:37:36,164 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T04:37:36,173 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T04:37:36,176 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T04:37:36,184 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T04:37:36,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T04:37:36,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T04:37:36,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:36,195 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:36,195 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=8fc3ff0a63e6,41945,1732250255718, sessionid=0x10160d53d010000, setting cluster-up flag (Was=false) 2024-11-22T04:37:36,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:36,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:36,247 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T04:37:36,249 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8fc3ff0a63e6,41945,1732250255718 2024-11-22T04:37:36,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:36,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:36,300 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T04:37:36,301 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8fc3ff0a63e6,41945,1732250255718 2024-11-22T04:37:36,303 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T04:37:36,304 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T04:37:36,305 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T04:37:36,305 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T04:37:36,305 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 8fc3ff0a63e6,41945,1732250255718 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T04:37:36,307 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:37:36,307 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:37:36,307 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:37:36,307 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:37:36,307 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/8fc3ff0a63e6:0, corePoolSize=10, maxPoolSize=10 2024-11-22T04:37:36,307 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:37:36,307 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:37:36,307 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:37:36,308 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732250286308 2024-11-22T04:37:36,309 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T04:37:36,309 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T04:37:36,309 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T04:37:36,309 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T04:37:36,309 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T04:37:36,309 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T04:37:36,310 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:37:36,310 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T04:37:36,310 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:36,311 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T04:37:36,311 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T04:37:36,311 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T04:37:36,311 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:36,311 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T04:37:36,311 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T04:37:36,311 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T04:37:36,311 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250256311,5,FailOnTimeoutGroup] 2024-11-22T04:37:36,312 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250256311,5,FailOnTimeoutGroup] 2024-11-22T04:37:36,312 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:36,312 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T04:37:36,312 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:36,312 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:36,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:37:36,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:37:36,322 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T04:37:36,322 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b 2024-11-22T04:37:36,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:37:36,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:37:36,328 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:37:36,329 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T04:37:36,330 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T04:37:36,330 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:36,330 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:37:36,330 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T04:37:36,331 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T04:37:36,331 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:36,332 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:37:36,332 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T04:37:36,333 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T04:37:36,333 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:36,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:37:36,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T04:37:36,335 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T04:37:36,335 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:36,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:37:36,335 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T04:37:36,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740 2024-11-22T04:37:36,336 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740 2024-11-22T04:37:36,337 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T04:37:36,337 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T04:37:36,337 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T04:37:36,338 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T04:37:36,340 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:37:36,341 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=872624, jitterRate=0.10959938168525696}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T04:37:36,341 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732250256328Initializing all the Stores at 1732250256329 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250256329Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250256329Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250256329Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250256329Cleaning up temporary data from old regions at 1732250256337 (+8 ms)Region opened successfully at 1732250256341 (+4 ms) 2024-11-22T04:37:36,341 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T04:37:36,341 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T04:37:36,341 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T04:37:36,341 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T04:37:36,341 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T04:37:36,342 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T04:37:36,342 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732250256341Disabling compacts and flushes for region at 1732250256341Disabling writes for close at 1732250256341Writing region close event to WAL at 1732250256342 (+1 ms)Closed at 1732250256342 2024-11-22T04:37:36,343 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:37:36,343 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T04:37:36,343 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T04:37:36,345 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T04:37:36,365 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T04:37:36,391 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(746): ClusterId : 64d2dbb3-c2b5-41e8-8d4d-c9218da7d931 2024-11-22T04:37:36,391 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T04:37:36,403 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T04:37:36,403 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T04:37:36,417 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T04:37:36,417 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@866b05c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:37:36,432 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;8fc3ff0a63e6:45737 2024-11-22T04:37:36,432 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T04:37:36,432 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T04:37:36,432 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T04:37:36,433 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(2659): reportForDuty to master=8fc3ff0a63e6,41945,1732250255718 with port=45737, startcode=1732250255970 2024-11-22T04:37:36,433 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T04:37:36,435 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38753, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T04:37:36,435 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41945 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:36,435 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41945 {}] master.ServerManager(517): Registering regionserver=8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:36,437 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b 2024-11-22T04:37:36,437 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41387 2024-11-22T04:37:36,437 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T04:37:36,447 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:37:36,448 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] zookeeper.ZKUtil(111): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:36,448 WARN [RS:0;8fc3ff0a63e6:45737 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:37:36,448 INFO [RS:0;8fc3ff0a63e6:45737 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:37:36,448 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/WALs/8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:36,448 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8fc3ff0a63e6,45737,1732250255970] 2024-11-22T04:37:36,451 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T04:37:36,452 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T04:37:36,452 INFO [RS:0;8fc3ff0a63e6:45737 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T04:37:36,452 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:36,452 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T04:37:36,453 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T04:37:36,453 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:36,453 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:37:36,453 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:37:36,453 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:37:36,454 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:37:36,454 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:37:36,454 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:37:36,454 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:37:36,454 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:37:36,454 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:37:36,454 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:37:36,454 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:37:36,454 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:37:36,454 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:37:36,454 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:37:36,454 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:36,454 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:36,455 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:36,455 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:36,455 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:36,455 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,45737,1732250255970-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:37:36,473 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T04:37:36,473 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,45737,1732250255970-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:36,473 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:36,473 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.Replication(171): 8fc3ff0a63e6,45737,1732250255970 started 2024-11-22T04:37:36,489 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:36,489 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(1482): Serving as 8fc3ff0a63e6,45737,1732250255970, RpcServer on 8fc3ff0a63e6/172.17.0.2:45737, sessionid=0x10160d53d010001 2024-11-22T04:37:36,489 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T04:37:36,489 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:36,489 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,45737,1732250255970' 2024-11-22T04:37:36,489 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T04:37:36,490 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T04:37:36,490 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T04:37:36,490 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T04:37:36,490 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:36,490 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,45737,1732250255970' 2024-11-22T04:37:36,490 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T04:37:36,490 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T04:37:36,491 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T04:37:36,491 INFO [RS:0;8fc3ff0a63e6:45737 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T04:37:36,491 INFO [RS:0;8fc3ff0a63e6:45737 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T04:37:36,515 WARN [8fc3ff0a63e6:41945 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T04:37:36,593 INFO [RS:0;8fc3ff0a63e6:45737 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C45737%2C1732250255970, suffix=, logDir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/WALs/8fc3ff0a63e6,45737,1732250255970, archiveDir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/oldWALs, maxLogs=32 2024-11-22T04:37:36,594 INFO [RS:0;8fc3ff0a63e6:45737 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C45737%2C1732250255970.1732250256594 2024-11-22T04:37:36,606 INFO [RS:0;8fc3ff0a63e6:45737 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/WALs/8fc3ff0a63e6,45737,1732250255970/8fc3ff0a63e6%2C45737%2C1732250255970.1732250256594 2024-11-22T04:37:36,607 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36505:36505),(127.0.0.1/127.0.0.1:41503:41503)] 2024-11-22T04:37:36,766 DEBUG [8fc3ff0a63e6:41945 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T04:37:36,766 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:36,768 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8fc3ff0a63e6,45737,1732250255970, state=OPENING 2024-11-22T04:37:36,826 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T04:37:36,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:36,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:37:36,838 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T04:37:36,838 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:37:36,838 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:37:36,838 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,45737,1732250255970}] 2024-11-22T04:37:36,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:36,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:36,992 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T04:37:36,995 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44325, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T04:37:36,999 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T04:37:36,999 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:37:37,001 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C45737%2C1732250255970.meta, suffix=.meta, logDir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/WALs/8fc3ff0a63e6,45737,1732250255970, archiveDir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/oldWALs, maxLogs=32 2024-11-22T04:37:37,002 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C45737%2C1732250255970.meta.1732250257001.meta 2024-11-22T04:37:37,007 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/WALs/8fc3ff0a63e6,45737,1732250255970/8fc3ff0a63e6%2C45737%2C1732250255970.meta.1732250257001.meta 2024-11-22T04:37:37,008 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36505:36505),(127.0.0.1/127.0.0.1:41503:41503)] 2024-11-22T04:37:37,009 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:37:37,009 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T04:37:37,009 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T04:37:37,009 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T04:37:37,009 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T04:37:37,010 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:37:37,010 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T04:37:37,010 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T04:37:37,011 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T04:37:37,012 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T04:37:37,012 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:37,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:37:37,013 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T04:37:37,014 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T04:37:37,014 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:37,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:37:37,014 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T04:37:37,015 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T04:37:37,015 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:37,015 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:37:37,016 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T04:37:37,016 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T04:37:37,016 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:37,017 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:37:37,017 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T04:37:37,018 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740 2024-11-22T04:37:37,019 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740 2024-11-22T04:37:37,020 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T04:37:37,020 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T04:37:37,021 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T04:37:37,022 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T04:37:37,022 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=745860, jitterRate=-0.05159007012844086}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T04:37:37,023 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T04:37:37,023 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732250257010Writing region info on filesystem at 1732250257010Initializing all the Stores at 1732250257011 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250257011Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250257011Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250257011Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250257011Cleaning up temporary data from old regions at 1732250257020 (+9 ms)Running coprocessor post-open hooks at 1732250257023 (+3 ms)Region opened successfully at 1732250257023 2024-11-22T04:37:37,024 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732250256992 2024-11-22T04:37:37,026 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T04:37:37,026 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T04:37:37,027 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:37,028 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8fc3ff0a63e6,45737,1732250255970, state=OPEN 2024-11-22T04:37:37,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T04:37:37,066 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T04:37:37,066 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:37,066 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:37:37,066 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:37:37,070 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T04:37:37,070 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,45737,1732250255970 in 228 msec 2024-11-22T04:37:37,073 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T04:37:37,073 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 727 msec 2024-11-22T04:37:37,074 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:37:37,074 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T04:37:37,076 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T04:37:37,076 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8fc3ff0a63e6,45737,1732250255970, seqNum=-1] 2024-11-22T04:37:37,076 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T04:37:37,078 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35875, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T04:37:37,085 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 779 msec 2024-11-22T04:37:37,085 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732250257085, completionTime=-1 2024-11-22T04:37:37,085 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T04:37:37,085 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T04:37:37,087 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T04:37:37,087 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732250317087 2024-11-22T04:37:37,087 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732250377087 2024-11-22T04:37:37,087 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T04:37:37,087 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41945,1732250255718-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:37,087 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41945,1732250255718-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:37,087 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41945,1732250255718-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:37,088 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-8fc3ff0a63e6:41945, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:37,088 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:37,088 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:37,090 DEBUG [master/8fc3ff0a63e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T04:37:37,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.055sec 2024-11-22T04:37:37,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T04:37:37,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T04:37:37,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T04:37:37,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T04:37:37,092 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T04:37:37,093 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41945,1732250255718-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:37:37,093 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41945,1732250255718-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T04:37:37,095 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T04:37:37,095 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T04:37:37,095 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,41945,1732250255718-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:37:37,191 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e397dcd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:37:37,191 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 8fc3ff0a63e6,41945,-1 for getting cluster id 2024-11-22T04:37:37,191 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T04:37:37,193 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '64d2dbb3-c2b5-41e8-8d4d-c9218da7d931' 2024-11-22T04:37:37,193 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T04:37:37,193 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "64d2dbb3-c2b5-41e8-8d4d-c9218da7d931" 2024-11-22T04:37:37,193 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12396787, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:37:37,194 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [8fc3ff0a63e6,41945,-1] 2024-11-22T04:37:37,194 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T04:37:37,194 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:37:37,195 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41856, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T04:37:37,196 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5fead160, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:37:37,197 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T04:37:37,198 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8fc3ff0a63e6,45737,1732250255970, seqNum=-1] 2024-11-22T04:37:37,198 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T04:37:37,199 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42978, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T04:37:37,201 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=8fc3ff0a63e6,41945,1732250255718 2024-11-22T04:37:37,201 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:37:37,205 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T04:37:37,205 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-22T04:37:37,206 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 8fc3ff0a63e6,41945,1732250255718 2024-11-22T04:37:37,206 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@10dedffc 2024-11-22T04:37:37,206 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-22T04:37:37,208 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41872, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-22T04:37:37,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41945 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-22T04:37:37,208 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41945 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-22T04:37:37,209 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41945 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T04:37:37,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41945 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-22T04:37:37,211 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-22T04:37:37,212 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:37,212 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41945 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-22T04:37:37,213 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-22T04:37:37,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T04:37:37,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741835_1011 (size=381) 2024-11-22T04:37:37,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741835_1011 (size=381) 2024-11-22T04:37:37,221 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7187ba4ff0ce5bd266e84d41e7ea2488, NAME => 'TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b 2024-11-22T04:37:37,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741836_1012 (size=64) 2024-11-22T04:37:37,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741836_1012 (size=64) 2024-11-22T04:37:37,231 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:37:37,231 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 7187ba4ff0ce5bd266e84d41e7ea2488, disabling compactions & flushes 2024-11-22T04:37:37,231 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. 2024-11-22T04:37:37,231 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. 2024-11-22T04:37:37,231 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. after waiting 0 ms 2024-11-22T04:37:37,231 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. 2024-11-22T04:37:37,231 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. 2024-11-22T04:37:37,231 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7187ba4ff0ce5bd266e84d41e7ea2488: Waiting for close lock at 1732250257231Disabling compacts and flushes for region at 1732250257231Disabling writes for close at 1732250257231Writing region close event to WAL at 1732250257231Closed at 1732250257231 2024-11-22T04:37:37,232 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-22T04:37:37,233 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732250257232"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732250257232"}]},"ts":"1732250257232"} 2024-11-22T04:37:37,235 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-22T04:37:37,236 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-22T04:37:37,236 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732250257236"}]},"ts":"1732250257236"} 2024-11-22T04:37:37,238 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-22T04:37:37,238 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7187ba4ff0ce5bd266e84d41e7ea2488, ASSIGN}] 2024-11-22T04:37:37,240 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7187ba4ff0ce5bd266e84d41e7ea2488, ASSIGN 2024-11-22T04:37:37,241 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7187ba4ff0ce5bd266e84d41e7ea2488, ASSIGN; state=OFFLINE, location=8fc3ff0a63e6,45737,1732250255970; forceNewPlan=false, retain=false 2024-11-22T04:37:37,391 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7187ba4ff0ce5bd266e84d41e7ea2488, regionState=OPENING, regionLocation=8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:37,394 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7187ba4ff0ce5bd266e84d41e7ea2488, ASSIGN because future has completed 2024-11-22T04:37:37,395 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7187ba4ff0ce5bd266e84d41e7ea2488, server=8fc3ff0a63e6,45737,1732250255970}] 2024-11-22T04:37:37,552 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. 2024-11-22T04:37:37,552 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7187ba4ff0ce5bd266e84d41e7ea2488, NAME => 'TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:37:37,552 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:37,552 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:37:37,552 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:37,553 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:37,554 INFO [StoreOpener-7187ba4ff0ce5bd266e84d41e7ea2488-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:37,555 INFO [StoreOpener-7187ba4ff0ce5bd266e84d41e7ea2488-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7187ba4ff0ce5bd266e84d41e7ea2488 columnFamilyName info 2024-11-22T04:37:37,556 DEBUG [StoreOpener-7187ba4ff0ce5bd266e84d41e7ea2488-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:37,556 INFO [StoreOpener-7187ba4ff0ce5bd266e84d41e7ea2488-1 {}] regionserver.HStore(327): Store=7187ba4ff0ce5bd266e84d41e7ea2488/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:37:37,556 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:37,557 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:37,557 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:37,558 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:37,558 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:37,560 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:37,562 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:37:37,563 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7187ba4ff0ce5bd266e84d41e7ea2488; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=879067, jitterRate=0.11779166758060455}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T04:37:37,563 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:37,564 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7187ba4ff0ce5bd266e84d41e7ea2488: Running coprocessor pre-open hook at 1732250257553Writing region info on filesystem at 1732250257553Initializing all the Stores at 1732250257553Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250257553Cleaning up temporary data from old regions at 1732250257558 (+5 ms)Running coprocessor post-open hooks at 1732250257563 (+5 ms)Region opened successfully at 1732250257564 (+1 ms) 2024-11-22T04:37:37,565 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488., pid=6, masterSystemTime=1732250257548 2024-11-22T04:37:37,568 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. 2024-11-22T04:37:37,568 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. 2024-11-22T04:37:37,569 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7187ba4ff0ce5bd266e84d41e7ea2488, regionState=OPEN, openSeqNum=2, regionLocation=8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:37,571 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7187ba4ff0ce5bd266e84d41e7ea2488, server=8fc3ff0a63e6,45737,1732250255970 because future has completed 2024-11-22T04:37:37,575 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-22T04:37:37,575 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7187ba4ff0ce5bd266e84d41e7ea2488, server=8fc3ff0a63e6,45737,1732250255970 in 178 msec 2024-11-22T04:37:37,578 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-22T04:37:37,578 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7187ba4ff0ce5bd266e84d41e7ea2488, ASSIGN in 337 msec 2024-11-22T04:37:37,579 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-22T04:37:37,580 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732250257580"}]},"ts":"1732250257580"} 2024-11-22T04:37:37,582 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-22T04:37:37,583 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-22T04:37:37,585 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 374 msec 2024-11-22T04:37:37,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,811 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,812 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,813 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,835 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,839 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,840 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,842 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:37,934 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:37,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:38,347 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T04:37:38,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,348 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,349 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,350 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,350 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,372 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,373 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:38,935 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:38,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:39,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:39,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:39,997 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-22T04:37:39,997 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-22T04:37:39,998 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-22T04:37:40,936 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:40,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:41,937 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:41,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:42,451 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T04:37:42,452 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-22T04:37:42,938 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:42,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:43,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:43,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:44,939 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:44,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:45,501 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T04:37:45,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,502 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,503 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,504 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,537 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,538 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,539 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,539 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,545 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,546 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,546 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,550 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:45,940 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:45,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:46,941 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:46,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:47,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-22T04:37:47,237 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-22T04:37:47,237 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-22T04:37:47,240 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-22T04:37:47,240 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. 2024-11-22T04:37:47,243 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488., hostname=8fc3ff0a63e6,45737,1732250255970, seqNum=2] 2024-11-22T04:37:47,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:47,259 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7187ba4ff0ce5bd266e84d41e7ea2488 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T04:37:47,279 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/b79fea9d9f8346b6aa75c8484394e330 is 1080, key is row0001/info:/1732250267245/Put/seqid=0 2024-11-22T04:37:47,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741837_1013 (size=12509) 2024-11-22T04:37:47,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741837_1013 (size=12509) 2024-11-22T04:37:47,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/b79fea9d9f8346b6aa75c8484394e330 2024-11-22T04:37:47,292 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/b79fea9d9f8346b6aa75c8484394e330 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/b79fea9d9f8346b6aa75c8484394e330 2024-11-22T04:37:47,298 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/b79fea9d9f8346b6aa75c8484394e330, entries=7, sequenceid=11, filesize=12.2 K 2024-11-22T04:37:47,299 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for 7187ba4ff0ce5bd266e84d41e7ea2488 in 40ms, sequenceid=11, compaction requested=false 2024-11-22T04:37:47,299 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7187ba4ff0ce5bd266e84d41e7ea2488: 2024-11-22T04:37:47,300 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:47,301 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7187ba4ff0ce5bd266e84d41e7ea2488 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-22T04:37:47,305 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/636ea62347ed421f9d8779a7da661cb1 is 1080, key is row0008/info:/1732250267261/Put/seqid=0 2024-11-22T04:37:47,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741838_1014 (size=25453) 2024-11-22T04:37:47,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741838_1014 (size=25453) 2024-11-22T04:37:47,312 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/636ea62347ed421f9d8779a7da661cb1 2024-11-22T04:37:47,317 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/636ea62347ed421f9d8779a7da661cb1 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/636ea62347ed421f9d8779a7da661cb1 2024-11-22T04:37:47,323 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/636ea62347ed421f9d8779a7da661cb1, entries=19, sequenceid=33, filesize=24.9 K 2024-11-22T04:37:47,324 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=6.30 KB/6456 for 7187ba4ff0ce5bd266e84d41e7ea2488 in 23ms, sequenceid=33, compaction requested=false 2024-11-22T04:37:47,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7187ba4ff0ce5bd266e84d41e7ea2488: 2024-11-22T04:37:47,324 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=37.1 K, sizeToCheck=16.0 K 2024-11-22T04:37:47,324 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:47,324 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/636ea62347ed421f9d8779a7da661cb1 because midkey is the same as first or last row 2024-11-22T04:37:47,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:47,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:48,942 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:48,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:49,315 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:49,316 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7187ba4ff0ce5bd266e84d41e7ea2488 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T04:37:49,321 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/be7ae7f095eb490bb6f12eb65633fe6e is 1080, key is row0027/info:/1732250267302/Put/seqid=0 2024-11-22T04:37:49,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741839_1015 (size=12509) 2024-11-22T04:37:49,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741839_1015 (size=12509) 2024-11-22T04:37:49,327 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/be7ae7f095eb490bb6f12eb65633fe6e 2024-11-22T04:37:49,333 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/be7ae7f095eb490bb6f12eb65633fe6e as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/be7ae7f095eb490bb6f12eb65633fe6e 2024-11-22T04:37:49,339 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/be7ae7f095eb490bb6f12eb65633fe6e, entries=7, sequenceid=43, filesize=12.2 K 2024-11-22T04:37:49,340 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 7187ba4ff0ce5bd266e84d41e7ea2488 in 25ms, sequenceid=43, compaction requested=true 2024-11-22T04:37:49,340 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7187ba4ff0ce5bd266e84d41e7ea2488: 2024-11-22T04:37:49,340 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-11-22T04:37:49,340 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:49,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:49,340 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/636ea62347ed421f9d8779a7da661cb1 because midkey is the same as first or last row 2024-11-22T04:37:49,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7187ba4ff0ce5bd266e84d41e7ea2488:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T04:37:49,341 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:37:49,341 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T04:37:49,341 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7187ba4ff0ce5bd266e84d41e7ea2488 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-22T04:37:49,342 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T04:37:49,342 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1541): 7187ba4ff0ce5bd266e84d41e7ea2488/info is initiating minor compaction (all files) 2024-11-22T04:37:49,342 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7187ba4ff0ce5bd266e84d41e7ea2488/info in TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. 2024-11-22T04:37:49,342 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/b79fea9d9f8346b6aa75c8484394e330, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/636ea62347ed421f9d8779a7da661cb1, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/be7ae7f095eb490bb6f12eb65633fe6e] into tmpdir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp, totalSize=49.3 K 2024-11-22T04:37:49,343 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting b79fea9d9f8346b6aa75c8484394e330, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732250267245 2024-11-22T04:37:49,343 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting 636ea62347ed421f9d8779a7da661cb1, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=33, earliestPutTs=1732250267261 2024-11-22T04:37:49,344 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting be7ae7f095eb490bb6f12eb65633fe6e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732250267302 2024-11-22T04:37:49,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/e0816992566a491bb0ef9f4236a03e74 is 1080, key is row0034/info:/1732250269317/Put/seqid=0 2024-11-22T04:37:49,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741840_1016 (size=16817) 2024-11-22T04:37:49,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741840_1016 (size=16817) 2024-11-22T04:37:49,354 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/e0816992566a491bb0ef9f4236a03e74 2024-11-22T04:37:49,361 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/e0816992566a491bb0ef9f4236a03e74 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/e0816992566a491bb0ef9f4236a03e74 2024-11-22T04:37:49,370 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/e0816992566a491bb0ef9f4236a03e74, entries=11, sequenceid=57, filesize=16.4 K 2024-11-22T04:37:49,371 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for 7187ba4ff0ce5bd266e84d41e7ea2488 in 30ms, sequenceid=57, compaction requested=false 2024-11-22T04:37:49,371 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7187ba4ff0ce5bd266e84d41e7ea2488: 2024-11-22T04:37:49,371 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.7 K, sizeToCheck=16.0 K 2024-11-22T04:37:49,371 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:49,371 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/636ea62347ed421f9d8779a7da661cb1 because midkey is the same as first or last row 2024-11-22T04:37:49,374 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:49,374 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7187ba4ff0ce5bd266e84d41e7ea2488 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-22T04:37:49,377 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7187ba4ff0ce5bd266e84d41e7ea2488#info#compaction#59 average throughput is 16.93 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T04:37:49,378 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/c42391e21ba44e12bfcc44027f17ac2a is 1080, key is row0001/info:/1732250267245/Put/seqid=0 2024-11-22T04:37:49,380 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/3e447acaad9f433ab8c0ae418683b0a8 is 1080, key is row0045/info:/1732250269342/Put/seqid=0 2024-11-22T04:37:49,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741841_1017 (size=40670) 2024-11-22T04:37:49,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741841_1017 (size=40670) 2024-11-22T04:37:49,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741842_1018 (size=18987) 2024-11-22T04:37:49,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741842_1018 (size=18987) 2024-11-22T04:37:49,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=73 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/3e447acaad9f433ab8c0ae418683b0a8 2024-11-22T04:37:49,399 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/3e447acaad9f433ab8c0ae418683b0a8 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/3e447acaad9f433ab8c0ae418683b0a8 2024-11-22T04:37:49,400 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/c42391e21ba44e12bfcc44027f17ac2a as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/c42391e21ba44e12bfcc44027f17ac2a 2024-11-22T04:37:49,405 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/3e447acaad9f433ab8c0ae418683b0a8, entries=13, sequenceid=73, filesize=18.5 K 2024-11-22T04:37:49,406 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=7.36 KB/7532 for 7187ba4ff0ce5bd266e84d41e7ea2488 in 32ms, sequenceid=73, compaction requested=false 2024-11-22T04:37:49,406 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7187ba4ff0ce5bd266e84d41e7ea2488: 2024-11-22T04:37:49,406 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.7 K, sizeToCheck=16.0 K 2024-11-22T04:37:49,406 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7187ba4ff0ce5bd266e84d41e7ea2488/info of 7187ba4ff0ce5bd266e84d41e7ea2488 into c42391e21ba44e12bfcc44027f17ac2a(size=39.7 K), total size for store is 74.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T04:37:49,406 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:49,406 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7187ba4ff0ce5bd266e84d41e7ea2488: 2024-11-22T04:37:49,406 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/c42391e21ba44e12bfcc44027f17ac2a because midkey is the same as first or last row 2024-11-22T04:37:49,406 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488., storeName=7187ba4ff0ce5bd266e84d41e7ea2488/info, priority=13, startTime=1732250269340; duration=0sec 2024-11-22T04:37:49,406 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.7 K, sizeToCheck=16.0 K 2024-11-22T04:37:49,406 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:49,406 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/c42391e21ba44e12bfcc44027f17ac2a because midkey is the same as first or last row 2024-11-22T04:37:49,407 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.7 K, sizeToCheck=16.0 K 2024-11-22T04:37:49,407 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:49,407 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/c42391e21ba44e12bfcc44027f17ac2a because midkey is the same as first or last row 2024-11-22T04:37:49,407 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=74.7 K, sizeToCheck=16.0 K 2024-11-22T04:37:49,407 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:49,407 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/c42391e21ba44e12bfcc44027f17ac2a because midkey is the same as first or last row 2024-11-22T04:37:49,407 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:37:49,407 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7187ba4ff0ce5bd266e84d41e7ea2488:info 2024-11-22T04:37:49,943 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:49,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:50,944 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:50,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:51,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,395 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7187ba4ff0ce5bd266e84d41e7ea2488 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-22T04:37:51,399 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/b92471536c0f4eea96c4ccbe3542c4cc is 1080, key is row0058/info:/1732250269376/Put/seqid=0 2024-11-22T04:37:51,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741843_1019 (size=13586) 2024-11-22T04:37:51,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741843_1019 (size=13586) 2024-11-22T04:37:51,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=85 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/b92471536c0f4eea96c4ccbe3542c4cc 2024-11-22T04:37:51,413 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/b92471536c0f4eea96c4ccbe3542c4cc as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/b92471536c0f4eea96c4ccbe3542c4cc 2024-11-22T04:37:51,419 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/b92471536c0f4eea96c4ccbe3542c4cc, entries=8, sequenceid=85, filesize=13.3 K 2024-11-22T04:37:51,420 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8608, heapSize ~9.23 KB/9456, currentSize=10.51 KB/10760 for 7187ba4ff0ce5bd266e84d41e7ea2488 in 25ms, sequenceid=85, compaction requested=true 2024-11-22T04:37:51,420 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7187ba4ff0ce5bd266e84d41e7ea2488: 2024-11-22T04:37:51,420 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=87.9 K, sizeToCheck=16.0 K 2024-11-22T04:37:51,420 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:51,420 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/c42391e21ba44e12bfcc44027f17ac2a because midkey is the same as first or last row 2024-11-22T04:37:51,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7187ba4ff0ce5bd266e84d41e7ea2488:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T04:37:51,420 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:37:51,420 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T04:37:51,421 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 4 files of size 90060 starting at candidate #0 after considering 3 permutations with 3 in ratio 2024-11-22T04:37:51,422 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1541): 7187ba4ff0ce5bd266e84d41e7ea2488/info is initiating minor compaction (all files) 2024-11-22T04:37:51,422 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7187ba4ff0ce5bd266e84d41e7ea2488/info in TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. 2024-11-22T04:37:51,422 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,422 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/c42391e21ba44e12bfcc44027f17ac2a, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/e0816992566a491bb0ef9f4236a03e74, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/3e447acaad9f433ab8c0ae418683b0a8, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/b92471536c0f4eea96c4ccbe3542c4cc] into tmpdir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp, totalSize=87.9 K 2024-11-22T04:37:51,422 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7187ba4ff0ce5bd266e84d41e7ea2488 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T04:37:51,422 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting c42391e21ba44e12bfcc44027f17ac2a, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732250267245 2024-11-22T04:37:51,423 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting e0816992566a491bb0ef9f4236a03e74, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732250269317 2024-11-22T04:37:51,423 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3e447acaad9f433ab8c0ae418683b0a8, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=73, earliestPutTs=1732250269342 2024-11-22T04:37:51,424 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting b92471536c0f4eea96c4ccbe3542c4cc, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1732250269376 2024-11-22T04:37:51,426 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/70a8b54cef9a437aad446d6648063993 is 1080, key is row0066/info:/1732250271396/Put/seqid=0 2024-11-22T04:37:51,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741844_1020 (size=17894) 2024-11-22T04:37:51,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741844_1020 (size=17894) 2024-11-22T04:37:51,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/70a8b54cef9a437aad446d6648063993 2024-11-22T04:37:51,442 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7187ba4ff0ce5bd266e84d41e7ea2488#info#compaction#63 average throughput is 22.23 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T04:37:51,443 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/9fad09bd59d8439f8532e19718fe7e77 is 1080, key is row0001/info:/1732250267245/Put/seqid=0 2024-11-22T04:37:51,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/70a8b54cef9a437aad446d6648063993 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/70a8b54cef9a437aad446d6648063993 2024-11-22T04:37:51,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741845_1021 (size=75412) 2024-11-22T04:37:51,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741845_1021 (size=75412) 2024-11-22T04:37:51,450 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/70a8b54cef9a437aad446d6648063993, entries=12, sequenceid=100, filesize=17.5 K 2024-11-22T04:37:51,451 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=11.56 KB/11836 for 7187ba4ff0ce5bd266e84d41e7ea2488 in 29ms, sequenceid=100, compaction requested=false 2024-11-22T04:37:51,451 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7187ba4ff0ce5bd266e84d41e7ea2488: 2024-11-22T04:37:51,451 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=105.4 K, sizeToCheck=16.0 K 2024-11-22T04:37:51,451 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:51,452 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/c42391e21ba44e12bfcc44027f17ac2a because midkey is the same as first or last row 2024-11-22T04:37:51,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7187ba4ff0ce5bd266e84d41e7ea2488 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-22T04:37:51,455 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/9fad09bd59d8439f8532e19718fe7e77 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/9fad09bd59d8439f8532e19718fe7e77 2024-11-22T04:37:51,458 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/792489f2194044c590986d2f54b3a863 is 1080, key is row0078/info:/1732250271424/Put/seqid=0 2024-11-22T04:37:51,463 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 7187ba4ff0ce5bd266e84d41e7ea2488/info of 7187ba4ff0ce5bd266e84d41e7ea2488 into 9fad09bd59d8439f8532e19718fe7e77(size=73.6 K), total size for store is 91.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T04:37:51,463 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7187ba4ff0ce5bd266e84d41e7ea2488: 2024-11-22T04:37:51,463 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488., storeName=7187ba4ff0ce5bd266e84d41e7ea2488/info, priority=12, startTime=1732250271420; duration=0sec 2024-11-22T04:37:51,463 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=91.1 K, sizeToCheck=16.0 K 2024-11-22T04:37:51,463 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:51,463 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=91.1 K, sizeToCheck=16.0 K 2024-11-22T04:37:51,463 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:51,463 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=91.1 K, sizeToCheck=16.0 K 2024-11-22T04:37:51,463 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:51,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741846_1022 (size=18987) 2024-11-22T04:37:51,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741846_1022 (size=18987) 2024-11-22T04:37:51,465 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=116 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/792489f2194044c590986d2f54b3a863 2024-11-22T04:37:51,466 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:37:51,466 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:37:51,466 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7187ba4ff0ce5bd266e84d41e7ea2488:info 2024-11-22T04:37:51,468 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41945 {}] assignment.AssignmentManager(1363): Split request from 8fc3ff0a63e6,45737,1732250255970, parent={ENCODED => 7187ba4ff0ce5bd266e84d41e7ea2488, NAME => 'TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-22T04:37:51,470 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/792489f2194044c590986d2f54b3a863 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/792489f2194044c590986d2f54b3a863 2024-11-22T04:37:51,473 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41945 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:51,475 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/792489f2194044c590986d2f54b3a863, entries=13, sequenceid=116, filesize=18.5 K 2024-11-22T04:37:51,476 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=6.30 KB/6456 for 7187ba4ff0ce5bd266e84d41e7ea2488 in 22ms, sequenceid=116, compaction requested=true 2024-11-22T04:37:51,476 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7187ba4ff0ce5bd266e84d41e7ea2488: 2024-11-22T04:37:51,476 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.7 K, sizeToCheck=16.0 K 2024-11-22T04:37:51,476 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:51,476 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.7 K, sizeToCheck=16.0 K 2024-11-22T04:37:51,477 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:51,477 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.7 K, sizeToCheck=16.0 K 2024-11-22T04:37:51,477 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-22T04:37:51,477 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=1 2024-11-22T04:37:51,477 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41945 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7187ba4ff0ce5bd266e84d41e7ea2488, daughterA=46810a06844ef014a5c2001af60a2c28, daughterB=243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:51,478 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7187ba4ff0ce5bd266e84d41e7ea2488, daughterA=46810a06844ef014a5c2001af60a2c28, daughterB=243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:51,478 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7187ba4ff0ce5bd266e84d41e7ea2488, daughterA=46810a06844ef014a5c2001af60a2c28, daughterB=243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:51,478 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7187ba4ff0ce5bd266e84d41e7ea2488, daughterA=46810a06844ef014a5c2001af60a2c28, daughterB=243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:51,479 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41945 {}] assignment.AssignmentManager(1363): Split request from 8fc3ff0a63e6,45737,1732250255970, parent={ENCODED => 7187ba4ff0ce5bd266e84d41e7ea2488, NAME => 'TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-22T04:37:51,480 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41945 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=SPLITTING, location=8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:51,481 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41945 {}] procedure2.ProcedureExecutor(1139): Stored pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7187ba4ff0ce5bd266e84d41e7ea2488, daughterA=8b05461836eb98b4b44045b2379b3c80, daughterB=4c84ef5bf32840021245856b55f66595 2024-11-22T04:37:51,481 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(842): Waiting on xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7187ba4ff0ce5bd266e84d41e7ea2488, daughterA=8b05461836eb98b4b44045b2379b3c80, daughterB=4c84ef5bf32840021245856b55f66595 held by pid=7 2024-11-22T04:37:51,485 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(162): LOCK_EVENT_WAIT SchemaLocking[serverLocks={},namespaceLocks={hbase=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},tableLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},regionLocks={},peerLocks={},metaLocks={hbase:meta=LockAndQueue[exclusiveLock=false,sharedLockCount=0,waitingProcCount=0]},globalLocks={}] 2024-11-22T04:37:51,486 DEBUG [PEWorker-3 {}] procedure2.ProcedureExecutor(1511): LOCK_EVENT_WAIT pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7187ba4ff0ce5bd266e84d41e7ea2488, daughterA=8b05461836eb98b4b44045b2379b3c80, daughterB=4c84ef5bf32840021245856b55f66595 2024-11-22T04:37:51,490 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7187ba4ff0ce5bd266e84d41e7ea2488, UNASSIGN}] 2024-11-22T04:37:51,491 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7187ba4ff0ce5bd266e84d41e7ea2488, UNASSIGN 2024-11-22T04:37:51,493 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=7187ba4ff0ce5bd266e84d41e7ea2488, regionState=CLOSING, regionLocation=8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:51,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7187ba4ff0ce5bd266e84d41e7ea2488, UNASSIGN because future has completed 2024-11-22T04:37:51,496 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-22T04:37:51,496 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7187ba4ff0ce5bd266e84d41e7ea2488, server=8fc3ff0a63e6,45737,1732250255970}] 2024-11-22T04:37:51,654 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(122): Close 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,654 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-22T04:37:51,655 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1722): Closing 7187ba4ff0ce5bd266e84d41e7ea2488, disabling compactions & flushes 2024-11-22T04:37:51,655 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. 2024-11-22T04:37:51,655 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. 2024-11-22T04:37:51,655 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. after waiting 0 ms 2024-11-22T04:37:51,655 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. 2024-11-22T04:37:51,655 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(2902): Flushing 7187ba4ff0ce5bd266e84d41e7ea2488 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-22T04:37:51,660 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/dc86849cdedc412e97161eb96fe9a813 is 1080, key is row0091/info:/1732250271455/Put/seqid=0 2024-11-22T04:37:51,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741847_1023 (size=11424) 2024-11-22T04:37:51,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741847_1023 (size=11424) 2024-11-22T04:37:51,667 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=126 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/dc86849cdedc412e97161eb96fe9a813 2024-11-22T04:37:51,673 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/.tmp/info/dc86849cdedc412e97161eb96fe9a813 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/dc86849cdedc412e97161eb96fe9a813 2024-11-22T04:37:51,679 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/dc86849cdedc412e97161eb96fe9a813, entries=6, sequenceid=126, filesize=11.2 K 2024-11-22T04:37:51,680 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6456, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 7187ba4ff0ce5bd266e84d41e7ea2488 in 25ms, sequenceid=126, compaction requested=true 2024-11-22T04:37:51,681 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/b79fea9d9f8346b6aa75c8484394e330, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/636ea62347ed421f9d8779a7da661cb1, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/c42391e21ba44e12bfcc44027f17ac2a, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/be7ae7f095eb490bb6f12eb65633fe6e, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/e0816992566a491bb0ef9f4236a03e74, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/3e447acaad9f433ab8c0ae418683b0a8, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/b92471536c0f4eea96c4ccbe3542c4cc] to archive 2024-11-22T04:37:51,682 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T04:37:51,684 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/b79fea9d9f8346b6aa75c8484394e330 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/b79fea9d9f8346b6aa75c8484394e330 2024-11-22T04:37:51,685 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/636ea62347ed421f9d8779a7da661cb1 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/636ea62347ed421f9d8779a7da661cb1 2024-11-22T04:37:51,686 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/c42391e21ba44e12bfcc44027f17ac2a to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/c42391e21ba44e12bfcc44027f17ac2a 2024-11-22T04:37:51,687 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/be7ae7f095eb490bb6f12eb65633fe6e to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/be7ae7f095eb490bb6f12eb65633fe6e 2024-11-22T04:37:51,688 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/e0816992566a491bb0ef9f4236a03e74 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/e0816992566a491bb0ef9f4236a03e74 2024-11-22T04:37:51,690 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/3e447acaad9f433ab8c0ae418683b0a8 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/3e447acaad9f433ab8c0ae418683b0a8 2024-11-22T04:37:51,691 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/b92471536c0f4eea96c4ccbe3542c4cc to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/b92471536c0f4eea96c4ccbe3542c4cc 2024-11-22T04:37:51,697 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/recovered.edits/129.seqid, newMaxSeqId=129, maxSeqId=1 2024-11-22T04:37:51,697 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. 2024-11-22T04:37:51,697 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] regionserver.HRegion(1676): Region close journal for 7187ba4ff0ce5bd266e84d41e7ea2488: Waiting for close lock at 1732250271655Running coprocessor pre-close hooks at 1732250271655Disabling compacts and flushes for region at 1732250271655Disabling writes for close at 1732250271655Obtaining lock to block concurrent updates at 1732250271655Preparing flush snapshotting stores in 7187ba4ff0ce5bd266e84d41e7ea2488 at 1732250271655Finished memstore snapshotting TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488., syncing WAL and waiting on mvcc, flushsize=dataSize=6456, getHeapSize=7152, getOffHeapSize=0, getCellsCount=6 at 1732250271655Flushing stores of TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. at 1732250271656 (+1 ms)Flushing 7187ba4ff0ce5bd266e84d41e7ea2488/info: creating writer at 1732250271656Flushing 7187ba4ff0ce5bd266e84d41e7ea2488/info: appending metadata at 1732250271660 (+4 ms)Flushing 7187ba4ff0ce5bd266e84d41e7ea2488/info: closing flushed file at 1732250271660Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@49430e80: reopening flushed file at 1732250271672 (+12 ms)Finished flush of dataSize ~6.30 KB/6456, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 7187ba4ff0ce5bd266e84d41e7ea2488 in 25ms, sequenceid=126, compaction requested=true at 1732250271680 (+8 ms)Writing region close event to WAL at 1732250271693 (+13 ms)Running coprocessor post-close hooks at 1732250271697 (+4 ms)Closed at 1732250271697 2024-11-22T04:37:51,700 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION, pid=10}] handler.UnassignRegionHandler(157): Closed 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,700 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=7187ba4ff0ce5bd266e84d41e7ea2488, regionState=CLOSED 2024-11-22T04:37:51,702 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=9, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7187ba4ff0ce5bd266e84d41e7ea2488, server=8fc3ff0a63e6,45737,1732250255970 because future has completed 2024-11-22T04:37:51,706 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-22T04:37:51,706 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; CloseRegionProcedure 7187ba4ff0ce5bd266e84d41e7ea2488, server=8fc3ff0a63e6,45737,1732250255970 in 208 msec 2024-11-22T04:37:51,709 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=7 2024-11-22T04:37:51,709 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=7187ba4ff0ce5bd266e84d41e7ea2488, UNASSIGN in 216 msec 2024-11-22T04:37:51,717 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:51,722 INFO [PEWorker-5 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 4 storefiles, region=7187ba4ff0ce5bd266e84d41e7ea2488, threads=4 2024-11-22T04:37:51,724 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/792489f2194044c590986d2f54b3a863 for region: 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,724 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/70a8b54cef9a437aad446d6648063993 for region: 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,724 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/9fad09bd59d8439f8532e19718fe7e77 for region: 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,724 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/dc86849cdedc412e97161eb96fe9a813 for region: 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,734 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/70a8b54cef9a437aad446d6648063993, top=true 2024-11-22T04:37:51,737 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/792489f2194044c590986d2f54b3a863, top=true 2024-11-22T04:37:51,741 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-70a8b54cef9a437aad446d6648063993 for child: 243e7a44d431d77d0a3c663b5e11c6ce, parent: 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,741 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/70a8b54cef9a437aad446d6648063993 for region: 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,743 DEBUG [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/dc86849cdedc412e97161eb96fe9a813, top=true 2024-11-22T04:37:51,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741848_1024 (size=27) 2024-11-22T04:37:51,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741848_1024 (size=27) 2024-11-22T04:37:51,746 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-792489f2194044c590986d2f54b3a863 for child: 243e7a44d431d77d0a3c663b5e11c6ce, parent: 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,746 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/792489f2194044c590986d2f54b3a863 for region: 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,753 INFO [StoreFileSplitter-pool-3 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-dc86849cdedc412e97161eb96fe9a813 for child: 243e7a44d431d77d0a3c663b5e11c6ce, parent: 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,753 DEBUG [StoreFileSplitter-pool-3 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/dc86849cdedc412e97161eb96fe9a813 for region: 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741849_1025 (size=27) 2024-11-22T04:37:51,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741849_1025 (size=27) 2024-11-22T04:37:51,762 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/9fad09bd59d8439f8532e19718fe7e77 for region: 7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:37:51,765 DEBUG [PEWorker-5 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 7187ba4ff0ce5bd266e84d41e7ea2488 Daughter A: [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/46810a06844ef014a5c2001af60a2c28/info/9fad09bd59d8439f8532e19718fe7e77.7187ba4ff0ce5bd266e84d41e7ea2488] storefiles, Daughter B: [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-70a8b54cef9a437aad446d6648063993, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-792489f2194044c590986d2f54b3a863, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/9fad09bd59d8439f8532e19718fe7e77.7187ba4ff0ce5bd266e84d41e7ea2488, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-dc86849cdedc412e97161eb96fe9a813] storefiles. 2024-11-22T04:37:51,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741850_1026 (size=71) 2024-11-22T04:37:51,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741850_1026 (size=71) 2024-11-22T04:37:51,775 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:51,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741851_1027 (size=71) 2024-11-22T04:37:51,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741851_1027 (size=71) 2024-11-22T04:37:51,809 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:51,822 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/46810a06844ef014a5c2001af60a2c28/recovered.edits/129.seqid, newMaxSeqId=129, maxSeqId=-1 2024-11-22T04:37:51,825 DEBUG [PEWorker-5 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/recovered.edits/129.seqid, newMaxSeqId=129, maxSeqId=-1 2024-11-22T04:37:51,827 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732250271827"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732250271827"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732250271827"}]},"ts":"1732250271827"} 2024-11-22T04:37:51,828 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732250271827"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732250271827"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732250271827"}]},"ts":"1732250271827"} 2024-11-22T04:37:51,828 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732250271827"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732250271827"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732250271827"}]},"ts":"1732250271827"} 2024-11-22T04:37:51,847 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=46810a06844ef014a5c2001af60a2c28, ASSIGN}, {pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=243e7a44d431d77d0a3c663b5e11c6ce, ASSIGN}] 2024-11-22T04:37:51,849 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=46810a06844ef014a5c2001af60a2c28, ASSIGN 2024-11-22T04:37:51,849 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=243e7a44d431d77d0a3c663b5e11c6ce, ASSIGN 2024-11-22T04:37:51,850 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=243e7a44d431d77d0a3c663b5e11c6ce, ASSIGN; state=SPLITTING_NEW, location=8fc3ff0a63e6,45737,1732250255970; forceNewPlan=false, retain=false 2024-11-22T04:37:51,850 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=46810a06844ef014a5c2001af60a2c28, ASSIGN; state=SPLITTING_NEW, location=8fc3ff0a63e6,45737,1732250255970; forceNewPlan=false, retain=false 2024-11-22T04:37:51,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:51,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:52,001 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=243e7a44d431d77d0a3c663b5e11c6ce, regionState=OPENING, regionLocation=8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:52,002 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=46810a06844ef014a5c2001af60a2c28, regionState=OPENING, regionLocation=8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:52,004 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=243e7a44d431d77d0a3c663b5e11c6ce, ASSIGN because future has completed 2024-11-22T04:37:52,005 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 243e7a44d431d77d0a3c663b5e11c6ce, server=8fc3ff0a63e6,45737,1732250255970}] 2024-11-22T04:37:52,007 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=46810a06844ef014a5c2001af60a2c28, ASSIGN because future has completed 2024-11-22T04:37:52,008 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 46810a06844ef014a5c2001af60a2c28, server=8fc3ff0a63e6,45737,1732250255970}] 2024-11-22T04:37:52,162 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28. 2024-11-22T04:37:52,163 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7752): Opening region: {ENCODED => 46810a06844ef014a5c2001af60a2c28, NAME => 'TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-22T04:37:52,163 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 46810a06844ef014a5c2001af60a2c28 2024-11-22T04:37:52,163 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:37:52,163 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7794): checking encryption for 46810a06844ef014a5c2001af60a2c28 2024-11-22T04:37:52,163 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(7797): checking classloading for 46810a06844ef014a5c2001af60a2c28 2024-11-22T04:37:52,164 INFO [StoreOpener-46810a06844ef014a5c2001af60a2c28-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 46810a06844ef014a5c2001af60a2c28 2024-11-22T04:37:52,165 INFO [StoreOpener-46810a06844ef014a5c2001af60a2c28-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 46810a06844ef014a5c2001af60a2c28 columnFamilyName info 2024-11-22T04:37:52,165 DEBUG [StoreOpener-46810a06844ef014a5c2001af60a2c28-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:52,177 DEBUG [StoreOpener-46810a06844ef014a5c2001af60a2c28-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/46810a06844ef014a5c2001af60a2c28/info/9fad09bd59d8439f8532e19718fe7e77.7187ba4ff0ce5bd266e84d41e7ea2488->hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/9fad09bd59d8439f8532e19718fe7e77-bottom 2024-11-22T04:37:52,178 INFO [StoreOpener-46810a06844ef014a5c2001af60a2c28-1 {}] regionserver.HStore(327): Store=46810a06844ef014a5c2001af60a2c28/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:37:52,178 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1038): replaying wal for 46810a06844ef014a5c2001af60a2c28 2024-11-22T04:37:52,179 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/46810a06844ef014a5c2001af60a2c28 2024-11-22T04:37:52,180 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/46810a06844ef014a5c2001af60a2c28 2024-11-22T04:37:52,180 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1048): stopping wal replay for 46810a06844ef014a5c2001af60a2c28 2024-11-22T04:37:52,180 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1060): Cleaning up temporary data for 46810a06844ef014a5c2001af60a2c28 2024-11-22T04:37:52,182 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1093): writing seq id for 46810a06844ef014a5c2001af60a2c28 2024-11-22T04:37:52,183 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1114): Opened 46810a06844ef014a5c2001af60a2c28; next sequenceid=130; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=703483, jitterRate=-0.10547634959220886}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T04:37:52,183 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 46810a06844ef014a5c2001af60a2c28 2024-11-22T04:37:52,183 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegion(1006): Region open journal for 46810a06844ef014a5c2001af60a2c28: Running coprocessor pre-open hook at 1732250272163Writing region info on filesystem at 1732250272163Initializing all the Stores at 1732250272164 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250272164Cleaning up temporary data from old regions at 1732250272180 (+16 ms)Running coprocessor post-open hooks at 1732250272183 (+3 ms)Region opened successfully at 1732250272183 2024-11-22T04:37:52,238 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28., pid=14, masterSystemTime=1732250272159 2024-11-22T04:37:52,239 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(403): Add compact mark for store 46810a06844ef014a5c2001af60a2c28:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T04:37:52,239 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-22T04:37:52,239 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:37:52,240 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28. 2024-11-22T04:37:52,240 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1541): 46810a06844ef014a5c2001af60a2c28/info is initiating minor compaction (all files) 2024-11-22T04:37:52,240 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 46810a06844ef014a5c2001af60a2c28/info in TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28. 2024-11-22T04:37:52,240 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/46810a06844ef014a5c2001af60a2c28/info/9fad09bd59d8439f8532e19718fe7e77.7187ba4ff0ce5bd266e84d41e7ea2488->hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/9fad09bd59d8439f8532e19718fe7e77-bottom] into tmpdir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/46810a06844ef014a5c2001af60a2c28/.tmp, totalSize=73.6 K 2024-11-22T04:37:52,241 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9fad09bd59d8439f8532e19718fe7e77.7187ba4ff0ce5bd266e84d41e7ea2488, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=85, earliestPutTs=1732250267245 2024-11-22T04:37:52,241 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28. 2024-11-22T04:37:52,241 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=14}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28. 2024-11-22T04:37:52,242 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:37:52,242 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 243e7a44d431d77d0a3c663b5e11c6ce, NAME => 'TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-22T04:37:52,242 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:52,242 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:37:52,242 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:52,242 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=46810a06844ef014a5c2001af60a2c28, regionState=OPEN, openSeqNum=130, regionLocation=8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:52,242 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:52,244 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41945 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=8fc3ff0a63e6,45737,1732250255970, table=TestLogRolling-testLogRolling, region=46810a06844ef014a5c2001af60a2c28. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-22T04:37:52,245 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-22T04:37:52,245 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-22T04:37:52,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-22T04:37:52,245 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=14, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 46810a06844ef014a5c2001af60a2c28, server=8fc3ff0a63e6,45737,1732250255970 because future has completed 2024-11-22T04:37:52,245 INFO [StoreOpener-243e7a44d431d77d0a3c663b5e11c6ce-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:52,246 INFO [StoreOpener-243e7a44d431d77d0a3c663b5e11c6ce-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 243e7a44d431d77d0a3c663b5e11c6ce columnFamilyName info 2024-11-22T04:37:52,246 DEBUG [StoreOpener-243e7a44d431d77d0a3c663b5e11c6ce-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:37:52,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=11 2024-11-22T04:37:52,253 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 46810a06844ef014a5c2001af60a2c28, server=8fc3ff0a63e6,45737,1732250255970 in 239 msec 2024-11-22T04:37:52,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=46810a06844ef014a5c2001af60a2c28, ASSIGN in 406 msec 2024-11-22T04:37:52,264 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 46810a06844ef014a5c2001af60a2c28#info#compaction#66 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T04:37:52,264 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/46810a06844ef014a5c2001af60a2c28/.tmp/info/98f0f78d193a42b18a5269f741bcd0f2 is 1080, key is row0001/info:/1732250267245/Put/seqid=0 2024-11-22T04:37:52,273 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/.tmp/info/1ca766ab4f3f4bbea0c90b43cc6df4c5 is 193, key is TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce./info:regioninfo/1732250272001/Put/seqid=0 2024-11-22T04:37:52,274 DEBUG [StoreOpener-243e7a44d431d77d0a3c663b5e11c6ce-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/9fad09bd59d8439f8532e19718fe7e77.7187ba4ff0ce5bd266e84d41e7ea2488->hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/9fad09bd59d8439f8532e19718fe7e77-top 2024-11-22T04:37:52,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741852_1028 (size=70862) 2024-11-22T04:37:52,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741852_1028 (size=70862) 2024-11-22T04:37:52,279 DEBUG [StoreOpener-243e7a44d431d77d0a3c663b5e11c6ce-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-70a8b54cef9a437aad446d6648063993 2024-11-22T04:37:52,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741853_1029 (size=9847) 2024-11-22T04:37:52,283 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/46810a06844ef014a5c2001af60a2c28/.tmp/info/98f0f78d193a42b18a5269f741bcd0f2 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/46810a06844ef014a5c2001af60a2c28/info/98f0f78d193a42b18a5269f741bcd0f2 2024-11-22T04:37:52,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741853_1029 (size=9847) 2024-11-22T04:37:52,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/.tmp/info/1ca766ab4f3f4bbea0c90b43cc6df4c5 2024-11-22T04:37:52,287 DEBUG [StoreOpener-243e7a44d431d77d0a3c663b5e11c6ce-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-792489f2194044c590986d2f54b3a863 2024-11-22T04:37:52,290 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 46810a06844ef014a5c2001af60a2c28/info of 46810a06844ef014a5c2001af60a2c28 into 98f0f78d193a42b18a5269f741bcd0f2(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T04:37:52,290 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 46810a06844ef014a5c2001af60a2c28: 2024-11-22T04:37:52,290 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28., storeName=46810a06844ef014a5c2001af60a2c28/info, priority=15, startTime=1732250272238; duration=0sec 2024-11-22T04:37:52,290 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:37:52,290 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 46810a06844ef014a5c2001af60a2c28:info 2024-11-22T04:37:52,293 DEBUG [StoreOpener-243e7a44d431d77d0a3c663b5e11c6ce-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-dc86849cdedc412e97161eb96fe9a813 2024-11-22T04:37:52,293 INFO [StoreOpener-243e7a44d431d77d0a3c663b5e11c6ce-1 {}] regionserver.HStore(327): Store=243e7a44d431d77d0a3c663b5e11c6ce/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:37:52,294 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:52,294 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:52,296 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:52,296 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:52,296 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:52,298 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:52,299 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 243e7a44d431d77d0a3c663b5e11c6ce; next sequenceid=130; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=789754, jitterRate=0.0042243897914886475}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-22T04:37:52,299 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:37:52,299 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 243e7a44d431d77d0a3c663b5e11c6ce: Running coprocessor pre-open hook at 1732250272242Writing region info on filesystem at 1732250272242Initializing all the Stores at 1732250272245 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250272245Cleaning up temporary data from old regions at 1732250272296 (+51 ms)Running coprocessor post-open hooks at 1732250272299 (+3 ms)Region opened successfully at 1732250272299 2024-11-22T04:37:52,300 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce., pid=13, masterSystemTime=1732250272159 2024-11-22T04:37:52,300 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 243e7a44d431d77d0a3c663b5e11c6ce:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T04:37:52,300 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:37:52,300 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 4 store files, 0 compacting, 4 eligible, 16 blocking 2024-11-22T04:37:52,302 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:37:52,302 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1541): 243e7a44d431d77d0a3c663b5e11c6ce/info is initiating minor compaction (all files) 2024-11-22T04:37:52,302 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 243e7a44d431d77d0a3c663b5e11c6ce/info in TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:37:52,302 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/9fad09bd59d8439f8532e19718fe7e77.7187ba4ff0ce5bd266e84d41e7ea2488->hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/9fad09bd59d8439f8532e19718fe7e77-top, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-70a8b54cef9a437aad446d6648063993, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-792489f2194044c590986d2f54b3a863, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-dc86849cdedc412e97161eb96fe9a813] into tmpdir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp, totalSize=120.8 K 2024-11-22T04:37:52,302 DEBUG [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:37:52,302 INFO [RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:37:52,303 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9fad09bd59d8439f8532e19718fe7e77.7187ba4ff0ce5bd266e84d41e7ea2488, keycount=32, bloomtype=ROW, size=73.6 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732250267245 2024-11-22T04:37:52,303 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=12 updating hbase:meta row=243e7a44d431d77d0a3c663b5e11c6ce, regionState=OPEN, openSeqNum=130, regionLocation=8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:52,303 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-70a8b54cef9a437aad446d6648063993, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732250271396 2024-11-22T04:37:52,304 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-792489f2194044c590986d2f54b3a863, keycount=13, bloomtype=ROW, size=18.5 K, encoding=NONE, compression=NONE, seqNum=116, earliestPutTs=1732250271424 2024-11-22T04:37:52,304 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-dc86849cdedc412e97161eb96fe9a813, keycount=6, bloomtype=ROW, size=11.2 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732250271455 2024-11-22T04:37:52,305 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=12, state=RUNNABLE, hasLock=false; OpenRegionProcedure 243e7a44d431d77d0a3c663b5e11c6ce, server=8fc3ff0a63e6,45737,1732250255970 because future has completed 2024-11-22T04:37:52,307 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/.tmp/ns/20a86e436dcf4bc792d5b4d61636147f is 43, key is default/ns:d/1732250257078/Put/seqid=0 2024-11-22T04:37:52,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741854_1030 (size=5153) 2024-11-22T04:37:52,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741854_1030 (size=5153) 2024-11-22T04:37:52,313 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/.tmp/ns/20a86e436dcf4bc792d5b4d61636147f 2024-11-22T04:37:52,317 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-11-22T04:37:52,317 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; OpenRegionProcedure 243e7a44d431d77d0a3c663b5e11c6ce, server=8fc3ff0a63e6,45737,1732250255970 in 302 msec 2024-11-22T04:37:52,320 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=7 2024-11-22T04:37:52,320 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=243e7a44d431d77d0a3c663b5e11c6ce, ASSIGN in 470 msec 2024-11-22T04:37:52,323 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7187ba4ff0ce5bd266e84d41e7ea2488, daughterA=46810a06844ef014a5c2001af60a2c28, daughterB=243e7a44d431d77d0a3c663b5e11c6ce in 847 msec 2024-11-22T04:37:52,323 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7187ba4ff0ce5bd266e84d41e7ea2488, daughterA=8b05461836eb98b4b44045b2379b3c80, daughterB=4c84ef5bf32840021245856b55f66595 2024-11-22T04:37:52,323 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7187ba4ff0ce5bd266e84d41e7ea2488, daughterA=8b05461836eb98b4b44045b2379b3c80, daughterB=4c84ef5bf32840021245856b55f66595 2024-11-22T04:37:52,323 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7187ba4ff0ce5bd266e84d41e7ea2488, daughterA=8b05461836eb98b4b44045b2379b3c80, daughterB=4c84ef5bf32840021245856b55f66595 2024-11-22T04:37:52,324 INFO [PEWorker-1 {}] assignment.SplitTableRegionProcedure(534): Split of {ENCODED => 7187ba4ff0ce5bd266e84d41e7ea2488, NAME => 'TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488.', STARTKEY => '', ENDKEY => ''} skipped; state is already SPLIT 2024-11-22T04:37:52,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=7187ba4ff0ce5bd266e84d41e7ea2488, daughterA=8b05461836eb98b4b44045b2379b3c80, daughterB=4c84ef5bf32840021245856b55f66595 in 844 msec 2024-11-22T04:37:52,337 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 243e7a44d431d77d0a3c663b5e11c6ce#info#compaction#69 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T04:37:52,338 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/1e9a4e46a1e443f588570effe3702b8d is 1080, key is row0062/info:/1732250269386/Put/seqid=0 2024-11-22T04:37:52,345 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/.tmp/table/6a2f970297354d02a81cada4a12a0248 is 65, key is TestLogRolling-testLogRolling/table:state/1732250257580/Put/seqid=0 2024-11-22T04:37:52,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741855_1031 (size=43081) 2024-11-22T04:37:52,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741855_1031 (size=43081) 2024-11-22T04:37:52,357 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/1e9a4e46a1e443f588570effe3702b8d as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/1e9a4e46a1e443f588570effe3702b8d 2024-11-22T04:37:52,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741856_1032 (size=5340) 2024-11-22T04:37:52,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741856_1032 (size=5340) 2024-11-22T04:37:52,360 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/.tmp/table/6a2f970297354d02a81cada4a12a0248 2024-11-22T04:37:52,366 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 4 (all) file(s) in 243e7a44d431d77d0a3c663b5e11c6ce/info of 243e7a44d431d77d0a3c663b5e11c6ce into 1e9a4e46a1e443f588570effe3702b8d(size=42.1 K), total size for store is 42.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T04:37:52,366 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:37:52,366 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce., storeName=243e7a44d431d77d0a3c663b5e11c6ce/info, priority=12, startTime=1732250272300; duration=0sec 2024-11-22T04:37:52,366 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:37:52,366 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 243e7a44d431d77d0a3c663b5e11c6ce:info 2024-11-22T04:37:52,367 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/.tmp/info/1ca766ab4f3f4bbea0c90b43cc6df4c5 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/info/1ca766ab4f3f4bbea0c90b43cc6df4c5 2024-11-22T04:37:52,376 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/info/1ca766ab4f3f4bbea0c90b43cc6df4c5, entries=30, sequenceid=17, filesize=9.6 K 2024-11-22T04:37:52,377 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/.tmp/ns/20a86e436dcf4bc792d5b4d61636147f as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/ns/20a86e436dcf4bc792d5b4d61636147f 2024-11-22T04:37:52,384 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/ns/20a86e436dcf4bc792d5b4d61636147f, entries=2, sequenceid=17, filesize=5.0 K 2024-11-22T04:37:52,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/.tmp/table/6a2f970297354d02a81cada4a12a0248 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/table/6a2f970297354d02a81cada4a12a0248 2024-11-22T04:37:52,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/table/6a2f970297354d02a81cada4a12a0248, entries=2, sequenceid=17, filesize=5.2 K 2024-11-22T04:37:52,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 147ms, sequenceid=17, compaction requested=false 2024-11-22T04:37:52,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T04:37:52,945 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:52,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:53,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:42978 deadline: 1732250283470, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. is not online on 8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:37:53,500 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488., hostname=8fc3ff0a63e6,45737,1732250255970, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488., hostname=8fc3ff0a63e6,45737,1732250255970, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. is not online on 8fc3ff0a63e6,45737,1732250255970 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T04:37:53,500 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488., hostname=8fc3ff0a63e6,45737,1732250255970, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488. is not online on 8fc3ff0a63e6,45737,1732250255970 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T04:37:53,500 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732250257208.7187ba4ff0ce5bd266e84d41e7ea2488., hostname=8fc3ff0a63e6,45737,1732250255970, seqNum=2 from cache 2024-11-22T04:37:53,946 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:53,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:54,947 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:54,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:55,948 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:55,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:56,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,698 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,699 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,720 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,726 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:56,949 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:56,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:57,235 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-22T04:37:57,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,238 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,238 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,239 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,269 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,273 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,274 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,277 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-22T04:37:57,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:57,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:58,950 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:58,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:59,951 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:37:59,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:00,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:00,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:01,952 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:01,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:02,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:02,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:03,602 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce., hostname=8fc3ff0a63e6,45737,1732250255970, seqNum=130] 2024-11-22T04:38:03,613 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:38:03,613 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 243e7a44d431d77d0a3c663b5e11c6ce 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T04:38:03,617 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/06885fa7ca78463fb1f34af049843122 is 1080, key is row0097/info:/1732250283603/Put/seqid=0 2024-11-22T04:38:03,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741857_1033 (size=12516) 2024-11-22T04:38:03,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741857_1033 (size=12516) 2024-11-22T04:38:03,622 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=140 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/06885fa7ca78463fb1f34af049843122 2024-11-22T04:38:03,629 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/06885fa7ca78463fb1f34af049843122 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/06885fa7ca78463fb1f34af049843122 2024-11-22T04:38:03,635 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/06885fa7ca78463fb1f34af049843122, entries=7, sequenceid=140, filesize=12.2 K 2024-11-22T04:38:03,635 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 243e7a44d431d77d0a3c663b5e11c6ce in 22ms, sequenceid=140, compaction requested=false 2024-11-22T04:38:03,635 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:03,636 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:38:03,636 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 243e7a44d431d77d0a3c663b5e11c6ce 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T04:38:03,639 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/c1713887bfe54730aaff6e7fb1cef7d9 is 1080, key is row0104/info:/1732250283614/Put/seqid=0 2024-11-22T04:38:03,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741858_1034 (size=17906) 2024-11-22T04:38:03,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741858_1034 (size=17906) 2024-11-22T04:38:03,648 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=155 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/c1713887bfe54730aaff6e7fb1cef7d9 2024-11-22T04:38:03,654 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/c1713887bfe54730aaff6e7fb1cef7d9 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/c1713887bfe54730aaff6e7fb1cef7d9 2024-11-22T04:38:03,659 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/c1713887bfe54730aaff6e7fb1cef7d9, entries=12, sequenceid=155, filesize=17.5 K 2024-11-22T04:38:03,660 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=13.66 KB/13988 for 243e7a44d431d77d0a3c663b5e11c6ce in 24ms, sequenceid=155, compaction requested=true 2024-11-22T04:38:03,660 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:03,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 243e7a44d431d77d0a3c663b5e11c6ce:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T04:38:03,660 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:38:03,660 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T04:38:03,661 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 73503 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T04:38:03,661 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1541): 243e7a44d431d77d0a3c663b5e11c6ce/info is initiating minor compaction (all files) 2024-11-22T04:38:03,661 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 243e7a44d431d77d0a3c663b5e11c6ce/info in TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:38:03,661 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/1e9a4e46a1e443f588570effe3702b8d, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/06885fa7ca78463fb1f34af049843122, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/c1713887bfe54730aaff6e7fb1cef7d9] into tmpdir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp, totalSize=71.8 K 2024-11-22T04:38:03,661 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1e9a4e46a1e443f588570effe3702b8d, keycount=35, bloomtype=ROW, size=42.1 K, encoding=NONE, compression=NONE, seqNum=126, earliestPutTs=1732250269386 2024-11-22T04:38:03,662 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting 06885fa7ca78463fb1f34af049843122, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=140, earliestPutTs=1732250283603 2024-11-22T04:38:03,662 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting c1713887bfe54730aaff6e7fb1cef7d9, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732250283614 2024-11-22T04:38:03,671 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 243e7a44d431d77d0a3c663b5e11c6ce#info#compaction#73 average throughput is 27.71 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T04:38:03,671 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/bd5013fbbd6a43e4b9c875ca4be42c32 is 1080, key is row0062/info:/1732250269386/Put/seqid=0 2024-11-22T04:38:03,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741859_1035 (size=63733) 2024-11-22T04:38:03,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741859_1035 (size=63733) 2024-11-22T04:38:03,682 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/bd5013fbbd6a43e4b9c875ca4be42c32 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/bd5013fbbd6a43e4b9c875ca4be42c32 2024-11-22T04:38:03,687 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 243e7a44d431d77d0a3c663b5e11c6ce/info of 243e7a44d431d77d0a3c663b5e11c6ce into bd5013fbbd6a43e4b9c875ca4be42c32(size=62.2 K), total size for store is 62.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T04:38:03,687 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:03,687 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce., storeName=243e7a44d431d77d0a3c663b5e11c6ce/info, priority=13, startTime=1732250283660; duration=0sec 2024-11-22T04:38:03,687 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:38:03,687 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 243e7a44d431d77d0a3c663b5e11c6ce:info 2024-11-22T04:38:03,953 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:03,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:04,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:04,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:05,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:38:05,662 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 243e7a44d431d77d0a3c663b5e11c6ce 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-22T04:38:05,667 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/ed94a14c6d4b44c5a9b0faf4cd62981b is 1080, key is row0116/info:/1732250283637/Put/seqid=0 2024-11-22T04:38:05,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741860_1036 (size=20078) 2024-11-22T04:38:05,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741860_1036 (size=20078) 2024-11-22T04:38:05,673 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=173 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/ed94a14c6d4b44c5a9b0faf4cd62981b 2024-11-22T04:38:05,679 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/ed94a14c6d4b44c5a9b0faf4cd62981b as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/ed94a14c6d4b44c5a9b0faf4cd62981b 2024-11-22T04:38:05,685 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/ed94a14c6d4b44c5a9b0faf4cd62981b, entries=14, sequenceid=173, filesize=19.6 K 2024-11-22T04:38:05,686 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=10.51 KB/10760 for 243e7a44d431d77d0a3c663b5e11c6ce in 24ms, sequenceid=173, compaction requested=false 2024-11-22T04:38:05,686 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:05,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:38:05,687 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 243e7a44d431d77d0a3c663b5e11c6ce 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-22T04:38:05,692 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/acc2f93457f74618be3946315f5366f0 is 1080, key is row0130/info:/1732250285664/Put/seqid=0 2024-11-22T04:38:05,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741861_1037 (size=16828) 2024-11-22T04:38:05,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741861_1037 (size=16828) 2024-11-22T04:38:05,698 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=187 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/acc2f93457f74618be3946315f5366f0 2024-11-22T04:38:05,698 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-22T04:38:05,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/acc2f93457f74618be3946315f5366f0 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/acc2f93457f74618be3946315f5366f0 2024-11-22T04:38:05,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/acc2f93457f74618be3946315f5366f0, entries=11, sequenceid=187, filesize=16.4 K 2024-11-22T04:38:05,710 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for 243e7a44d431d77d0a3c663b5e11c6ce in 23ms, sequenceid=187, compaction requested=true 2024-11-22T04:38:05,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:05,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 243e7a44d431d77d0a3c663b5e11c6ce:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T04:38:05,710 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:38:05,710 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T04:38:05,712 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 100639 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T04:38:05,712 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.HStore(1541): 243e7a44d431d77d0a3c663b5e11c6ce/info is initiating minor compaction (all files) 2024-11-22T04:38:05,712 INFO [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 243e7a44d431d77d0a3c663b5e11c6ce/info in TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:38:05,712 INFO [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/bd5013fbbd6a43e4b9c875ca4be42c32, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/ed94a14c6d4b44c5a9b0faf4cd62981b, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/acc2f93457f74618be3946315f5366f0] into tmpdir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp, totalSize=98.3 K 2024-11-22T04:38:05,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:38:05,713 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 243e7a44d431d77d0a3c663b5e11c6ce 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T04:38:05,714 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] compactions.Compactor(225): Compacting bd5013fbbd6a43e4b9c875ca4be42c32, keycount=54, bloomtype=ROW, size=62.2 K, encoding=NONE, compression=NONE, seqNum=155, earliestPutTs=1732250269386 2024-11-22T04:38:05,715 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] compactions.Compactor(225): Compacting ed94a14c6d4b44c5a9b0faf4cd62981b, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=173, earliestPutTs=1732250283637 2024-11-22T04:38:05,715 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] compactions.Compactor(225): Compacting acc2f93457f74618be3946315f5366f0, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732250285664 2024-11-22T04:38:05,719 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/8272d2952090453bba22c6d25ca16263 is 1080, key is row0141/info:/1732250285688/Put/seqid=0 2024-11-22T04:38:05,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741862_1038 (size=17906) 2024-11-22T04:38:05,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741862_1038 (size=17906) 2024-11-22T04:38:05,733 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/8272d2952090453bba22c6d25ca16263 2024-11-22T04:38:05,735 INFO [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 243e7a44d431d77d0a3c663b5e11c6ce#info#compaction#77 average throughput is 40.53 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T04:38:05,736 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/224c826e22e7421499e4a8d070f0bb87 is 1080, key is row0062/info:/1732250269386/Put/seqid=0 2024-11-22T04:38:05,740 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/8272d2952090453bba22c6d25ca16263 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/8272d2952090453bba22c6d25ca16263 2024-11-22T04:38:05,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741863_1039 (size=90862) 2024-11-22T04:38:05,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741863_1039 (size=90862) 2024-11-22T04:38:05,745 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/8272d2952090453bba22c6d25ca16263, entries=12, sequenceid=202, filesize=17.5 K 2024-11-22T04:38:05,746 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/224c826e22e7421499e4a8d070f0bb87 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/224c826e22e7421499e4a8d070f0bb87 2024-11-22T04:38:05,746 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=8.41 KB/8608 for 243e7a44d431d77d0a3c663b5e11c6ce in 33ms, sequenceid=202, compaction requested=false 2024-11-22T04:38:05,746 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:05,751 INFO [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 243e7a44d431d77d0a3c663b5e11c6ce/info of 243e7a44d431d77d0a3c663b5e11c6ce into 224c826e22e7421499e4a8d070f0bb87(size=88.7 K), total size for store is 106.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T04:38:05,751 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:05,752 INFO [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce., storeName=243e7a44d431d77d0a3c663b5e11c6ce/info, priority=13, startTime=1732250285710; duration=0sec 2024-11-22T04:38:05,752 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:38:05,752 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 243e7a44d431d77d0a3c663b5e11c6ce:info 2024-11-22T04:38:05,954 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:05,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:06,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:06,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:07,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:38:07,734 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 243e7a44d431d77d0a3c663b5e11c6ce 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-22T04:38:07,739 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/b0385268549c4d5a93e9825c4635c9fa is 1080, key is row0153/info:/1732250285715/Put/seqid=0 2024-11-22T04:38:07,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741864_1040 (size=14672) 2024-11-22T04:38:07,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741864_1040 (size=14672) 2024-11-22T04:38:07,746 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=215 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/b0385268549c4d5a93e9825c4635c9fa 2024-11-22T04:38:07,751 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/b0385268549c4d5a93e9825c4635c9fa as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/b0385268549c4d5a93e9825c4635c9fa 2024-11-22T04:38:07,757 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/b0385268549c4d5a93e9825c4635c9fa, entries=9, sequenceid=215, filesize=14.3 K 2024-11-22T04:38:07,758 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=11.56 KB/11836 for 243e7a44d431d77d0a3c663b5e11c6ce in 24ms, sequenceid=215, compaction requested=true 2024-11-22T04:38:07,758 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:07,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 243e7a44d431d77d0a3c663b5e11c6ce:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T04:38:07,758 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:38:07,758 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T04:38:07,759 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123440 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T04:38:07,759 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1541): 243e7a44d431d77d0a3c663b5e11c6ce/info is initiating minor compaction (all files) 2024-11-22T04:38:07,759 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 243e7a44d431d77d0a3c663b5e11c6ce/info in TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:38:07,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:38:07,760 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/224c826e22e7421499e4a8d070f0bb87, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/8272d2952090453bba22c6d25ca16263, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/b0385268549c4d5a93e9825c4635c9fa] into tmpdir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp, totalSize=120.5 K 2024-11-22T04:38:07,760 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 243e7a44d431d77d0a3c663b5e11c6ce 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-22T04:38:07,760 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting 224c826e22e7421499e4a8d070f0bb87, keycount=79, bloomtype=ROW, size=88.7 K, encoding=NONE, compression=NONE, seqNum=187, earliestPutTs=1732250269386 2024-11-22T04:38:07,761 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8272d2952090453bba22c6d25ca16263, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=202, earliestPutTs=1732250285688 2024-11-22T04:38:07,761 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting b0385268549c4d5a93e9825c4635c9fa, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732250285715 2024-11-22T04:38:07,764 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/a894688b85134e7998e4afdcce502d71 is 1080, key is row0162/info:/1732250287736/Put/seqid=0 2024-11-22T04:38:07,788 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 243e7a44d431d77d0a3c663b5e11c6ce#info#compaction#80 average throughput is 34.21 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T04:38:07,788 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/a61ddd0303624ed2b69fe88c726a23b5 is 1080, key is row0062/info:/1732250269386/Put/seqid=0 2024-11-22T04:38:07,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741865_1041 (size=17906) 2024-11-22T04:38:07,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741865_1041 (size=17906) 2024-11-22T04:38:07,792 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=230 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/a894688b85134e7998e4afdcce502d71 2024-11-22T04:38:07,799 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/a894688b85134e7998e4afdcce502d71 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/a894688b85134e7998e4afdcce502d71 2024-11-22T04:38:07,804 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/a894688b85134e7998e4afdcce502d71, entries=12, sequenceid=230, filesize=17.5 K 2024-11-22T04:38:07,805 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=17.86 KB/18292 for 243e7a44d431d77d0a3c663b5e11c6ce in 45ms, sequenceid=230, compaction requested=false 2024-11-22T04:38:07,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:07,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741866_1042 (size=113606) 2024-11-22T04:38:07,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741866_1042 (size=113606) 2024-11-22T04:38:07,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:38:07,808 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 243e7a44d431d77d0a3c663b5e11c6ce 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-22T04:38:07,812 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/8f25851308a846b2848baaac56004471 is 1080, key is row0174/info:/1732250287761/Put/seqid=0 2024-11-22T04:38:07,813 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/a61ddd0303624ed2b69fe88c726a23b5 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/a61ddd0303624ed2b69fe88c726a23b5 2024-11-22T04:38:07,819 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 243e7a44d431d77d0a3c663b5e11c6ce/info of 243e7a44d431d77d0a3c663b5e11c6ce into a61ddd0303624ed2b69fe88c726a23b5(size=110.9 K), total size for store is 128.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T04:38:07,819 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:07,820 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce., storeName=243e7a44d431d77d0a3c663b5e11c6ce/info, priority=13, startTime=1732250287758; duration=0sec 2024-11-22T04:38:07,820 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:38:07,820 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 243e7a44d431d77d0a3c663b5e11c6ce:info 2024-11-22T04:38:07,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741867_1043 (size=24394) 2024-11-22T04:38:07,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741867_1043 (size=24394) 2024-11-22T04:38:07,826 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/8f25851308a846b2848baaac56004471 2024-11-22T04:38:07,836 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/8f25851308a846b2848baaac56004471 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/8f25851308a846b2848baaac56004471 2024-11-22T04:38:07,844 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/8f25851308a846b2848baaac56004471, entries=18, sequenceid=251, filesize=23.8 K 2024-11-22T04:38:07,845 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=1.05 KB/1076 for 243e7a44d431d77d0a3c663b5e11c6ce in 37ms, sequenceid=251, compaction requested=true 2024-11-22T04:38:07,846 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:07,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 243e7a44d431d77d0a3c663b5e11c6ce:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T04:38:07,846 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:38:07,846 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T04:38:07,847 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 155906 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T04:38:07,847 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1541): 243e7a44d431d77d0a3c663b5e11c6ce/info is initiating minor compaction (all files) 2024-11-22T04:38:07,847 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 243e7a44d431d77d0a3c663b5e11c6ce/info in TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:38:07,847 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/a61ddd0303624ed2b69fe88c726a23b5, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/a894688b85134e7998e4afdcce502d71, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/8f25851308a846b2848baaac56004471] into tmpdir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp, totalSize=152.3 K 2024-11-22T04:38:07,847 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting a61ddd0303624ed2b69fe88c726a23b5, keycount=100, bloomtype=ROW, size=110.9 K, encoding=NONE, compression=NONE, seqNum=215, earliestPutTs=1732250269386 2024-11-22T04:38:07,848 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting a894688b85134e7998e4afdcce502d71, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=230, earliestPutTs=1732250287736 2024-11-22T04:38:07,848 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8f25851308a846b2848baaac56004471, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732250287761 2024-11-22T04:38:07,860 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 243e7a44d431d77d0a3c663b5e11c6ce#info#compaction#82 average throughput is 44.47 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T04:38:07,861 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/6a628a0a7fc54cb1b2d762a9731939d3 is 1080, key is row0062/info:/1732250269386/Put/seqid=0 2024-11-22T04:38:07,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741868_1044 (size=146253) 2024-11-22T04:38:07,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741868_1044 (size=146253) 2024-11-22T04:38:07,873 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/6a628a0a7fc54cb1b2d762a9731939d3 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/6a628a0a7fc54cb1b2d762a9731939d3 2024-11-22T04:38:07,880 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 243e7a44d431d77d0a3c663b5e11c6ce/info of 243e7a44d431d77d0a3c663b5e11c6ce into 6a628a0a7fc54cb1b2d762a9731939d3(size=142.8 K), total size for store is 142.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T04:38:07,880 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:07,881 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce., storeName=243e7a44d431d77d0a3c663b5e11c6ce/info, priority=13, startTime=1732250287846; duration=0sec 2024-11-22T04:38:07,881 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:38:07,881 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 243e7a44d431d77d0a3c663b5e11c6ce:info 2024-11-22T04:38:07,955 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:07,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:08,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:08,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:09,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:38:09,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 243e7a44d431d77d0a3c663b5e11c6ce 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T04:38:09,829 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/bafb9277cf304d1b8f4e11ca861eee8c is 1080, key is row0192/info:/1732250287809/Put/seqid=0 2024-11-22T04:38:09,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741869_1045 (size=12522) 2024-11-22T04:38:09,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741869_1045 (size=12522) 2024-11-22T04:38:09,871 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=243e7a44d431d77d0a3c663b5e11c6ce, server=8fc3ff0a63e6,45737,1732250255970 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:3.0.0-beta-2-SNAPSHOT] 2024-11-22T04:38:09,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] ipc.CallRunner(138): callId: 229 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:42978 deadline: 1732250299871, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=243e7a44d431d77d0a3c663b5e11c6ce, server=8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:38:09,872 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce., hostname=8fc3ff0a63e6,45737,1732250255970, seqNum=130 , the old value is region=TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce., hostname=8fc3ff0a63e6,45737,1732250255970, seqNum=130, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=243e7a44d431d77d0a3c663b5e11c6ce, server=8fc3ff0a63e6,45737,1732250255970 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T04:38:09,872 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce., hostname=8fc3ff0a63e6,45737,1732250255970, seqNum=130 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=243e7a44d431d77d0a3c663b5e11c6ce, server=8fc3ff0a63e6,45737,1732250255970 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-22T04:38:09,872 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce., hostname=8fc3ff0a63e6,45737,1732250255970, seqNum=130 because the exception is null or not the one we care about 2024-11-22T04:38:09,956 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:09,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:10,235 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/bafb9277cf304d1b8f4e11ca861eee8c 2024-11-22T04:38:10,241 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/bafb9277cf304d1b8f4e11ca861eee8c as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/bafb9277cf304d1b8f4e11ca861eee8c 2024-11-22T04:38:10,245 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/bafb9277cf304d1b8f4e11ca861eee8c, entries=7, sequenceid=263, filesize=12.2 K 2024-11-22T04:38:10,246 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 243e7a44d431d77d0a3c663b5e11c6ce in 421ms, sequenceid=263, compaction requested=false 2024-11-22T04:38:10,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:10,957 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:10,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:11,958 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:11,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:12,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:12,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:13,959 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:13,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:14,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:14,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:15,960 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:15,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:16,961 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:16,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:17,096 INFO [master/8fc3ff0a63e6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-22T04:38:17,096 INFO [master/8fc3ff0a63e6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-22T04:38:17,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:17,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:18,962 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:18,966 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:19,963 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:19,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:19,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:38:19,969 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 243e7a44d431d77d0a3c663b5e11c6ce 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-22T04:38:19,976 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/ac439430aaad473d855f19a08d8712b8 is 1080, key is row0199/info:/1732250289826/Put/seqid=0 2024-11-22T04:38:19,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741870_1046 (size=29807) 2024-11-22T04:38:19,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741870_1046 (size=29807) 2024-11-22T04:38:19,982 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=289 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/ac439430aaad473d855f19a08d8712b8 2024-11-22T04:38:19,987 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/ac439430aaad473d855f19a08d8712b8 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/ac439430aaad473d855f19a08d8712b8 2024-11-22T04:38:19,991 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/ac439430aaad473d855f19a08d8712b8, entries=23, sequenceid=289, filesize=29.1 K 2024-11-22T04:38:19,992 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=3.15 KB/3228 for 243e7a44d431d77d0a3c663b5e11c6ce in 23ms, sequenceid=289, compaction requested=true 2024-11-22T04:38:19,992 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:19,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 243e7a44d431d77d0a3c663b5e11c6ce:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T04:38:19,992 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:38:19,992 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T04:38:19,993 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 188582 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T04:38:19,993 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.HStore(1541): 243e7a44d431d77d0a3c663b5e11c6ce/info is initiating minor compaction (all files) 2024-11-22T04:38:19,994 INFO [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 243e7a44d431d77d0a3c663b5e11c6ce/info in TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:38:19,994 INFO [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/6a628a0a7fc54cb1b2d762a9731939d3, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/bafb9277cf304d1b8f4e11ca861eee8c, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/ac439430aaad473d855f19a08d8712b8] into tmpdir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp, totalSize=184.2 K 2024-11-22T04:38:19,994 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] compactions.Compactor(225): Compacting 6a628a0a7fc54cb1b2d762a9731939d3, keycount=130, bloomtype=ROW, size=142.8 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732250269386 2024-11-22T04:38:19,994 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] compactions.Compactor(225): Compacting bafb9277cf304d1b8f4e11ca861eee8c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732250287809 2024-11-22T04:38:19,995 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] compactions.Compactor(225): Compacting ac439430aaad473d855f19a08d8712b8, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732250289826 2024-11-22T04:38:20,005 INFO [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 243e7a44d431d77d0a3c663b5e11c6ce#info#compaction#85 average throughput is 54.73 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T04:38:20,006 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/2a02ce6ad17249b18131741c6f8e85a8 is 1080, key is row0062/info:/1732250269386/Put/seqid=0 2024-11-22T04:38:20,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741871_1047 (size=178732) 2024-11-22T04:38:20,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741871_1047 (size=178732) 2024-11-22T04:38:20,014 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/2a02ce6ad17249b18131741c6f8e85a8 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/2a02ce6ad17249b18131741c6f8e85a8 2024-11-22T04:38:20,019 INFO [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 243e7a44d431d77d0a3c663b5e11c6ce/info of 243e7a44d431d77d0a3c663b5e11c6ce into 2a02ce6ad17249b18131741c6f8e85a8(size=174.5 K), total size for store is 174.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T04:38:20,019 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:20,019 INFO [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce., storeName=243e7a44d431d77d0a3c663b5e11c6ce/info, priority=13, startTime=1732250299992; duration=0sec 2024-11-22T04:38:20,019 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:38:20,019 DEBUG [RS:0;8fc3ff0a63e6:45737-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 243e7a44d431d77d0a3c663b5e11c6ce:info 2024-11-22T04:38:20,964 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:20,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:21,965 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:21,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:21,994 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:38:21,994 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 243e7a44d431d77d0a3c663b5e11c6ce 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-22T04:38:21,999 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/2f34090198d248d0a0e772daefca977c is 1080, key is row0222/info:/1732250299971/Put/seqid=0 2024-11-22T04:38:22,010 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-22T04:38:22,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741872_1048 (size=12523) 2024-11-22T04:38:22,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741872_1048 (size=12523) 2024-11-22T04:38:22,014 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=300 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/2f34090198d248d0a0e772daefca977c 2024-11-22T04:38:22,019 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/2f34090198d248d0a0e772daefca977c as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/2f34090198d248d0a0e772daefca977c 2024-11-22T04:38:22,024 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/2f34090198d248d0a0e772daefca977c, entries=7, sequenceid=300, filesize=12.2 K 2024-11-22T04:38:22,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for 243e7a44d431d77d0a3c663b5e11c6ce in 31ms, sequenceid=300, compaction requested=false 2024-11-22T04:38:22,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:22,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45737 {}] regionserver.HRegion(8855): Flush requested on 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:38:22,027 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 243e7a44d431d77d0a3c663b5e11c6ce 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-22T04:38:22,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/aef27304be6f48df9d1ae198fcd7b674 is 1080, key is row0229/info:/1732250301995/Put/seqid=0 2024-11-22T04:38:22,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741873_1049 (size=24412) 2024-11-22T04:38:22,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741873_1049 (size=24412) 2024-11-22T04:38:22,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=321 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/aef27304be6f48df9d1ae198fcd7b674 2024-11-22T04:38:22,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/aef27304be6f48df9d1ae198fcd7b674 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/aef27304be6f48df9d1ae198fcd7b674 2024-11-22T04:38:22,045 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/aef27304be6f48df9d1ae198fcd7b674, entries=18, sequenceid=321, filesize=23.8 K 2024-11-22T04:38:22,046 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=10.51 KB/10760 for 243e7a44d431d77d0a3c663b5e11c6ce in 19ms, sequenceid=321, compaction requested=true 2024-11-22T04:38:22,046 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:22,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 243e7a44d431d77d0a3c663b5e11c6ce:info, priority=-2147483648, current under compaction store size is 1 2024-11-22T04:38:22,046 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:38:22,046 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-22T04:38:22,047 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 215667 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-22T04:38:22,047 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1541): 243e7a44d431d77d0a3c663b5e11c6ce/info is initiating minor compaction (all files) 2024-11-22T04:38:22,047 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 243e7a44d431d77d0a3c663b5e11c6ce/info in TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:38:22,048 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/2a02ce6ad17249b18131741c6f8e85a8, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/2f34090198d248d0a0e772daefca977c, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/aef27304be6f48df9d1ae198fcd7b674] into tmpdir=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp, totalSize=210.6 K 2024-11-22T04:38:22,048 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2a02ce6ad17249b18131741c6f8e85a8, keycount=160, bloomtype=ROW, size=174.5 K, encoding=NONE, compression=NONE, seqNum=289, earliestPutTs=1732250269386 2024-11-22T04:38:22,048 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2f34090198d248d0a0e772daefca977c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=300, earliestPutTs=1732250299971 2024-11-22T04:38:22,048 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] compactions.Compactor(225): Compacting aef27304be6f48df9d1ae198fcd7b674, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=321, earliestPutTs=1732250301995 2024-11-22T04:38:22,059 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 243e7a44d431d77d0a3c663b5e11c6ce#info#compaction#88 average throughput is 63.28 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-22T04:38:22,059 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/512cf2ecc80240d6a228f857d97d1537 is 1080, key is row0062/info:/1732250269386/Put/seqid=0 2024-11-22T04:38:22,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741874_1050 (size=205886) 2024-11-22T04:38:22,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741874_1050 (size=205886) 2024-11-22T04:38:22,103 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/512cf2ecc80240d6a228f857d97d1537 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/512cf2ecc80240d6a228f857d97d1537 2024-11-22T04:38:22,108 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 243e7a44d431d77d0a3c663b5e11c6ce/info of 243e7a44d431d77d0a3c663b5e11c6ce into 512cf2ecc80240d6a228f857d97d1537(size=201.1 K), total size for store is 201.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-22T04:38:22,108 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:22,108 INFO [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce., storeName=243e7a44d431d77d0a3c663b5e11c6ce/info, priority=13, startTime=1732250302046; duration=0sec 2024-11-22T04:38:22,108 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-22T04:38:22,108 DEBUG [RS:0;8fc3ff0a63e6:45737-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 243e7a44d431d77d0a3c663b5e11c6ce:info 2024-11-22T04:38:22,967 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:22,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:23,968 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:23,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:24,045 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-22T04:38:24,045 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C45737%2C1732250255970.1732250304045 2024-11-22T04:38:24,058 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,059 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,059 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,059 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,059 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,059 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/WALs/8fc3ff0a63e6,45737,1732250255970/8fc3ff0a63e6%2C45737%2C1732250255970.1732250256594 with entries=312, filesize=308.52 KB; new WAL /user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/WALs/8fc3ff0a63e6,45737,1732250255970/8fc3ff0a63e6%2C45737%2C1732250255970.1732250304045 2024-11-22T04:38:24,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741833_1009 (size=315930) 2024-11-22T04:38:24,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741833_1009 (size=315930) 2024-11-22T04:38:24,065 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36505:36505),(127.0.0.1/127.0.0.1:41503:41503)] 2024-11-22T04:38:24,067 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 46810a06844ef014a5c2001af60a2c28: 2024-11-22T04:38:24,067 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 243e7a44d431d77d0a3c663b5e11c6ce 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-22T04:38:24,071 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/40c77f6a82d0433ca43d5eb81b3f15a0 is 1080, key is row0247/info:/1732250302027/Put/seqid=0 2024-11-22T04:38:24,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741876_1052 (size=15760) 2024-11-22T04:38:24,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741876_1052 (size=15760) 2024-11-22T04:38:24,079 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=335 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/40c77f6a82d0433ca43d5eb81b3f15a0 2024-11-22T04:38:24,084 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/.tmp/info/40c77f6a82d0433ca43d5eb81b3f15a0 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/40c77f6a82d0433ca43d5eb81b3f15a0 2024-11-22T04:38:24,089 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/40c77f6a82d0433ca43d5eb81b3f15a0, entries=10, sequenceid=335, filesize=15.4 K 2024-11-22T04:38:24,090 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 243e7a44d431d77d0a3c663b5e11c6ce in 23ms, sequenceid=335, compaction requested=false 2024-11-22T04:38:24,090 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 243e7a44d431d77d0a3c663b5e11c6ce: 2024-11-22T04:38:24,090 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-22T04:38:24,094 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/.tmp/info/77472a6f7a6c411b83b7e90b6d6cee26 is 193, key is TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce./info:regioninfo/1732250272303/Put/seqid=0 2024-11-22T04:38:24,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741877_1053 (size=6223) 2024-11-22T04:38:24,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741877_1053 (size=6223) 2024-11-22T04:38:24,101 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/.tmp/info/77472a6f7a6c411b83b7e90b6d6cee26 2024-11-22T04:38:24,105 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/.tmp/info/77472a6f7a6c411b83b7e90b6d6cee26 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/info/77472a6f7a6c411b83b7e90b6d6cee26 2024-11-22T04:38:24,109 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/info/77472a6f7a6c411b83b7e90b6d6cee26, entries=5, sequenceid=21, filesize=6.1 K 2024-11-22T04:38:24,110 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=21, compaction requested=false 2024-11-22T04:38:24,110 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-22T04:38:24,111 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C45737%2C1732250255970.1732250304111 2024-11-22T04:38:24,117 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,117 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,117 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,117 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,117 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,117 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/WALs/8fc3ff0a63e6,45737,1732250255970/8fc3ff0a63e6%2C45737%2C1732250255970.1732250304045 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/WALs/8fc3ff0a63e6,45737,1732250255970/8fc3ff0a63e6%2C45737%2C1732250255970.1732250304111 2024-11-22T04:38:24,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741875_1051 (size=731) 2024-11-22T04:38:24,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741875_1051 (size=731) 2024-11-22T04:38:24,120 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41503:41503),(127.0.0.1/127.0.0.1:36505:36505)] 2024-11-22T04:38:24,121 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-22T04:38:24,121 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T04:38:24,121 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T04:38:24,122 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:38:24,122 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:38:24,122 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:38:24,122 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T04:38:24,122 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T04:38:24,122 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1664057984, stopped=false 2024-11-22T04:38:24,122 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=8fc3ff0a63e6,41945,1732250255718 2024-11-22T04:38:24,122 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/WALs/8fc3ff0a63e6,45737,1732250255970/8fc3ff0a63e6%2C45737%2C1732250255970.1732250256594 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/oldWALs/8fc3ff0a63e6%2C45737%2C1732250255970.1732250256594 2024-11-22T04:38:24,123 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/WALs/8fc3ff0a63e6,45737,1732250255970/8fc3ff0a63e6%2C45737%2C1732250255970.1732250304045 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/oldWALs/8fc3ff0a63e6%2C45737%2C1732250255970.1732250304045 2024-11-22T04:38:24,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:38:24,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:38:24,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:24,202 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:24,202 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T04:38:24,202 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T04:38:24,202 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:38:24,202 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:38:24,202 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:38:24,203 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:38:24,203 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '8fc3ff0a63e6,45737,1732250255970' ***** 2024-11-22T04:38:24,203 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T04:38:24,203 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T04:38:24,203 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T04:38:24,203 INFO [RS:0;8fc3ff0a63e6:45737 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T04:38:24,203 INFO [RS:0;8fc3ff0a63e6:45737 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T04:38:24,203 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(3091): Received CLOSE for 46810a06844ef014a5c2001af60a2c28 2024-11-22T04:38:24,204 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(3091): Received CLOSE for 243e7a44d431d77d0a3c663b5e11c6ce 2024-11-22T04:38:24,204 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(959): stopping server 8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:38:24,204 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 46810a06844ef014a5c2001af60a2c28, disabling compactions & flushes 2024-11-22T04:38:24,204 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:38:24,204 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28. 2024-11-22T04:38:24,204 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28. 2024-11-22T04:38:24,204 INFO [RS:0;8fc3ff0a63e6:45737 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;8fc3ff0a63e6:45737. 2024-11-22T04:38:24,204 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28. after waiting 0 ms 2024-11-22T04:38:24,204 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28. 2024-11-22T04:38:24,204 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:38:24,204 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:38:24,204 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T04:38:24,204 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T04:38:24,204 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T04:38:24,204 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T04:38:24,204 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-22T04:38:24,204 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(1325): Online Regions={46810a06844ef014a5c2001af60a2c28=TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28., 243e7a44d431d77d0a3c663b5e11c6ce=TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce., 1588230740=hbase:meta,,1.1588230740} 2024-11-22T04:38:24,204 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T04:38:24,204 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 243e7a44d431d77d0a3c663b5e11c6ce, 46810a06844ef014a5c2001af60a2c28 2024-11-22T04:38:24,205 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T04:38:24,205 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T04:38:24,204 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/46810a06844ef014a5c2001af60a2c28/info/9fad09bd59d8439f8532e19718fe7e77.7187ba4ff0ce5bd266e84d41e7ea2488->hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/9fad09bd59d8439f8532e19718fe7e77-bottom] to archive 2024-11-22T04:38:24,205 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T04:38:24,205 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T04:38:24,206 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T04:38:24,208 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/46810a06844ef014a5c2001af60a2c28/info/9fad09bd59d8439f8532e19718fe7e77.7187ba4ff0ce5bd266e84d41e7ea2488 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/46810a06844ef014a5c2001af60a2c28/info/9fad09bd59d8439f8532e19718fe7e77.7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:38:24,208 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=8fc3ff0a63e6:41945 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] ... 16 more 2024-11-22T04:38:24,209 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-22T04:38:24,210 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-22T04:38:24,211 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T04:38:24,211 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T04:38:24,211 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732250304204Running coprocessor pre-close hooks at 1732250304204Disabling compacts and flushes for region at 1732250304204Disabling writes for close at 1732250304205 (+1 ms)Writing region close event to WAL at 1732250304206 (+1 ms)Running coprocessor post-close hooks at 1732250304211 (+5 ms)Closed at 1732250304211 2024-11-22T04:38:24,211 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T04:38:24,213 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/46810a06844ef014a5c2001af60a2c28/recovered.edits/134.seqid, newMaxSeqId=134, maxSeqId=129 2024-11-22T04:38:24,214 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28. 2024-11-22T04:38:24,214 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 46810a06844ef014a5c2001af60a2c28: Waiting for close lock at 1732250304204Running coprocessor pre-close hooks at 1732250304204Disabling compacts and flushes for region at 1732250304204Disabling writes for close at 1732250304204Writing region close event to WAL at 1732250304209 (+5 ms)Running coprocessor post-close hooks at 1732250304214 (+5 ms)Closed at 1732250304214 2024-11-22T04:38:24,214 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732250271473.46810a06844ef014a5c2001af60a2c28. 2024-11-22T04:38:24,214 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 243e7a44d431d77d0a3c663b5e11c6ce, disabling compactions & flushes 2024-11-22T04:38:24,214 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:38:24,214 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:38:24,214 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. after waiting 0 ms 2024-11-22T04:38:24,214 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:38:24,214 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/9fad09bd59d8439f8532e19718fe7e77.7187ba4ff0ce5bd266e84d41e7ea2488->hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/7187ba4ff0ce5bd266e84d41e7ea2488/info/9fad09bd59d8439f8532e19718fe7e77-top, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-70a8b54cef9a437aad446d6648063993, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-792489f2194044c590986d2f54b3a863, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/1e9a4e46a1e443f588570effe3702b8d, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-dc86849cdedc412e97161eb96fe9a813, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/06885fa7ca78463fb1f34af049843122, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/bd5013fbbd6a43e4b9c875ca4be42c32, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/c1713887bfe54730aaff6e7fb1cef7d9, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/ed94a14c6d4b44c5a9b0faf4cd62981b, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/224c826e22e7421499e4a8d070f0bb87, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/acc2f93457f74618be3946315f5366f0, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/8272d2952090453bba22c6d25ca16263, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/a61ddd0303624ed2b69fe88c726a23b5, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/b0385268549c4d5a93e9825c4635c9fa, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/a894688b85134e7998e4afdcce502d71, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/6a628a0a7fc54cb1b2d762a9731939d3, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/8f25851308a846b2848baaac56004471, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/bafb9277cf304d1b8f4e11ca861eee8c, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/2a02ce6ad17249b18131741c6f8e85a8, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/ac439430aaad473d855f19a08d8712b8, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/2f34090198d248d0a0e772daefca977c, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/aef27304be6f48df9d1ae198fcd7b674] to archive 2024-11-22T04:38:24,215 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-22T04:38:24,217 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/9fad09bd59d8439f8532e19718fe7e77.7187ba4ff0ce5bd266e84d41e7ea2488 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/9fad09bd59d8439f8532e19718fe7e77.7187ba4ff0ce5bd266e84d41e7ea2488 2024-11-22T04:38:24,218 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-70a8b54cef9a437aad446d6648063993 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-70a8b54cef9a437aad446d6648063993 2024-11-22T04:38:24,219 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-792489f2194044c590986d2f54b3a863 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-792489f2194044c590986d2f54b3a863 2024-11-22T04:38:24,220 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/1e9a4e46a1e443f588570effe3702b8d to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/1e9a4e46a1e443f588570effe3702b8d 2024-11-22T04:38:24,221 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-dc86849cdedc412e97161eb96fe9a813 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/TestLogRolling-testLogRolling=7187ba4ff0ce5bd266e84d41e7ea2488-dc86849cdedc412e97161eb96fe9a813 2024-11-22T04:38:24,222 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/06885fa7ca78463fb1f34af049843122 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/06885fa7ca78463fb1f34af049843122 2024-11-22T04:38:24,223 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/bd5013fbbd6a43e4b9c875ca4be42c32 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/bd5013fbbd6a43e4b9c875ca4be42c32 2024-11-22T04:38:24,224 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/c1713887bfe54730aaff6e7fb1cef7d9 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/c1713887bfe54730aaff6e7fb1cef7d9 2024-11-22T04:38:24,225 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/ed94a14c6d4b44c5a9b0faf4cd62981b to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/ed94a14c6d4b44c5a9b0faf4cd62981b 2024-11-22T04:38:24,226 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/224c826e22e7421499e4a8d070f0bb87 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/224c826e22e7421499e4a8d070f0bb87 2024-11-22T04:38:24,227 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/acc2f93457f74618be3946315f5366f0 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/acc2f93457f74618be3946315f5366f0 2024-11-22T04:38:24,228 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/8272d2952090453bba22c6d25ca16263 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/8272d2952090453bba22c6d25ca16263 2024-11-22T04:38:24,229 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/a61ddd0303624ed2b69fe88c726a23b5 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/a61ddd0303624ed2b69fe88c726a23b5 2024-11-22T04:38:24,230 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/b0385268549c4d5a93e9825c4635c9fa to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/b0385268549c4d5a93e9825c4635c9fa 2024-11-22T04:38:24,231 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/a894688b85134e7998e4afdcce502d71 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/a894688b85134e7998e4afdcce502d71 2024-11-22T04:38:24,232 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/6a628a0a7fc54cb1b2d762a9731939d3 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/6a628a0a7fc54cb1b2d762a9731939d3 2024-11-22T04:38:24,233 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/8f25851308a846b2848baaac56004471 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/8f25851308a846b2848baaac56004471 2024-11-22T04:38:24,234 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/bafb9277cf304d1b8f4e11ca861eee8c to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/bafb9277cf304d1b8f4e11ca861eee8c 2024-11-22T04:38:24,235 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/2a02ce6ad17249b18131741c6f8e85a8 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/2a02ce6ad17249b18131741c6f8e85a8 2024-11-22T04:38:24,236 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/ac439430aaad473d855f19a08d8712b8 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/ac439430aaad473d855f19a08d8712b8 2024-11-22T04:38:24,237 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/2f34090198d248d0a0e772daefca977c to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/2f34090198d248d0a0e772daefca977c 2024-11-22T04:38:24,238 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/aef27304be6f48df9d1ae198fcd7b674 to hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/archive/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/info/aef27304be6f48df9d1ae198fcd7b674 2024-11-22T04:38:24,238 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1e9a4e46a1e443f588570effe3702b8d=43081, 06885fa7ca78463fb1f34af049843122=12516, bd5013fbbd6a43e4b9c875ca4be42c32=63733, c1713887bfe54730aaff6e7fb1cef7d9=17906, ed94a14c6d4b44c5a9b0faf4cd62981b=20078, 224c826e22e7421499e4a8d070f0bb87=90862, acc2f93457f74618be3946315f5366f0=16828, 8272d2952090453bba22c6d25ca16263=17906, a61ddd0303624ed2b69fe88c726a23b5=113606, b0385268549c4d5a93e9825c4635c9fa=14672, a894688b85134e7998e4afdcce502d71=17906, 6a628a0a7fc54cb1b2d762a9731939d3=146253, 8f25851308a846b2848baaac56004471=24394, bafb9277cf304d1b8f4e11ca861eee8c=12522, 2a02ce6ad17249b18131741c6f8e85a8=178732, ac439430aaad473d855f19a08d8712b8=29807, 2f34090198d248d0a0e772daefca977c=12523, aef27304be6f48df9d1ae198fcd7b674=24412] 2024-11-22T04:38:24,241 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/data/default/TestLogRolling-testLogRolling/243e7a44d431d77d0a3c663b5e11c6ce/recovered.edits/338.seqid, newMaxSeqId=338, maxSeqId=129 2024-11-22T04:38:24,241 INFO [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:38:24,241 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 243e7a44d431d77d0a3c663b5e11c6ce: Waiting for close lock at 1732250304214Running coprocessor pre-close hooks at 1732250304214Disabling compacts and flushes for region at 1732250304214Disabling writes for close at 1732250304214Writing region close event to WAL at 1732250304238 (+24 ms)Running coprocessor post-close hooks at 1732250304241 (+3 ms)Closed at 1732250304241 2024-11-22T04:38:24,241 DEBUG [RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732250271473.243e7a44d431d77d0a3c663b5e11c6ce. 2024-11-22T04:38:24,405 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(976): stopping server 8fc3ff0a63e6,45737,1732250255970; all regions closed. 2024-11-22T04:38:24,405 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,406 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,406 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,406 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,406 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741834_1010 (size=8107) 2024-11-22T04:38:24,411 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741834_1010 (size=8107) 2024-11-22T04:38:24,415 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/oldWALs 2024-11-22T04:38:24,415 INFO [RS:0;8fc3ff0a63e6:45737 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C45737%2C1732250255970.meta:.meta(num 1732250257001) 2024-11-22T04:38:24,416 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,416 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,416 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,416 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,416 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741878_1054 (size=780) 2024-11-22T04:38:24,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741878_1054 (size=780) 2024-11-22T04:38:24,419 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/oldWALs 2024-11-22T04:38:24,419 INFO [RS:0;8fc3ff0a63e6:45737 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C45737%2C1732250255970:(num 1732250304111) 2024-11-22T04:38:24,419 DEBUG [RS:0;8fc3ff0a63e6:45737 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:38:24,419 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:38:24,420 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:38:24,420 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.ChoreService(370): Chore service for: regionserver/8fc3ff0a63e6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T04:38:24,420 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:38:24,420 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:38:24,420 INFO [RS:0;8fc3ff0a63e6:45737 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45737 2024-11-22T04:38:24,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:38:24,433 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8fc3ff0a63e6,45737,1732250255970 2024-11-22T04:38:24,433 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:38:24,444 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8fc3ff0a63e6,45737,1732250255970] 2024-11-22T04:38:24,454 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/8fc3ff0a63e6,45737,1732250255970 already deleted, retry=false 2024-11-22T04:38:24,454 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 8fc3ff0a63e6,45737,1732250255970 expired; onlineServers=0 2024-11-22T04:38:24,454 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '8fc3ff0a63e6,41945,1732250255718' ***** 2024-11-22T04:38:24,454 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T04:38:24,454 INFO [M:0;8fc3ff0a63e6:41945 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:38:24,454 INFO [M:0;8fc3ff0a63e6:41945 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:38:24,454 DEBUG [M:0;8fc3ff0a63e6:41945 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T04:38:24,454 DEBUG [M:0;8fc3ff0a63e6:41945 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T04:38:24,455 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T04:38:24,455 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250256311 {}] cleaner.HFileCleaner(306): Exit Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250256311,5,FailOnTimeoutGroup] 2024-11-22T04:38:24,455 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250256311 {}] cleaner.HFileCleaner(306): Exit Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250256311,5,FailOnTimeoutGroup] 2024-11-22T04:38:24,455 INFO [M:0;8fc3ff0a63e6:41945 {}] hbase.ChoreService(370): Chore service for: master/8fc3ff0a63e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T04:38:24,455 INFO [M:0;8fc3ff0a63e6:41945 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:38:24,455 DEBUG [M:0;8fc3ff0a63e6:41945 {}] master.HMaster(1795): Stopping service threads 2024-11-22T04:38:24,455 INFO [M:0;8fc3ff0a63e6:41945 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T04:38:24,455 INFO [M:0;8fc3ff0a63e6:41945 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T04:38:24,455 INFO [M:0;8fc3ff0a63e6:41945 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T04:38:24,455 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T04:38:24,457 INFO [regionserver/8fc3ff0a63e6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:38:24,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T04:38:24,465 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:24,465 DEBUG [M:0;8fc3ff0a63e6:41945 {}] zookeeper.ZKUtil(347): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T04:38:24,465 WARN [M:0;8fc3ff0a63e6:41945 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T04:38:24,465 INFO [M:0;8fc3ff0a63e6:41945 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/.lastflushedseqids 2024-11-22T04:38:24,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741879_1055 (size=228) 2024-11-22T04:38:24,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741879_1055 (size=228) 2024-11-22T04:38:24,470 INFO [M:0;8fc3ff0a63e6:41945 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T04:38:24,470 INFO [M:0;8fc3ff0a63e6:41945 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T04:38:24,471 DEBUG [M:0;8fc3ff0a63e6:41945 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T04:38:24,471 INFO [M:0;8fc3ff0a63e6:41945 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:38:24,471 DEBUG [M:0;8fc3ff0a63e6:41945 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:38:24,471 DEBUG [M:0;8fc3ff0a63e6:41945 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T04:38:24,471 DEBUG [M:0;8fc3ff0a63e6:41945 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:38:24,471 INFO [M:0;8fc3ff0a63e6:41945 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=53.72 KB heapSize=65.96 KB 2024-11-22T04:38:24,485 DEBUG [M:0;8fc3ff0a63e6:41945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7191dc5172c44eb5ac37c45682d03729 is 82, key is hbase:meta,,1/info:regioninfo/1732250257027/Put/seqid=0 2024-11-22T04:38:24,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741880_1056 (size=5672) 2024-11-22T04:38:24,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741880_1056 (size=5672) 2024-11-22T04:38:24,490 INFO [M:0;8fc3ff0a63e6:41945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7191dc5172c44eb5ac37c45682d03729 2024-11-22T04:38:24,506 DEBUG [M:0;8fc3ff0a63e6:41945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7d58365ee94c4d2b9ce06c736a9daf0c is 751, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732250257584/Put/seqid=0 2024-11-22T04:38:24,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741881_1057 (size=7681) 2024-11-22T04:38:24,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741881_1057 (size=7681) 2024-11-22T04:38:24,511 INFO [M:0;8fc3ff0a63e6:41945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=53.12 KB at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7d58365ee94c4d2b9ce06c736a9daf0c 2024-11-22T04:38:24,515 INFO [M:0;8fc3ff0a63e6:41945 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7d58365ee94c4d2b9ce06c736a9daf0c 2024-11-22T04:38:24,529 DEBUG [M:0;8fc3ff0a63e6:41945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d523d9adfe514e169aeb3f63d3f9f931 is 69, key is 8fc3ff0a63e6,45737,1732250255970/rs:state/1732250256436/Put/seqid=0 2024-11-22T04:38:24,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741882_1058 (size=5156) 2024-11-22T04:38:24,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741882_1058 (size=5156) 2024-11-22T04:38:24,533 INFO [M:0;8fc3ff0a63e6:41945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d523d9adfe514e169aeb3f63d3f9f931 2024-11-22T04:38:24,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:38:24,544 INFO [RS:0;8fc3ff0a63e6:45737 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:38:24,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45737-0x10160d53d010001, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:38:24,544 INFO [RS:0;8fc3ff0a63e6:45737 {}] regionserver.HRegionServer(1031): Exiting; stopping=8fc3ff0a63e6,45737,1732250255970; zookeeper connection closed. 2024-11-22T04:38:24,544 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@759ca9e6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@759ca9e6 2024-11-22T04:38:24,544 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T04:38:24,550 DEBUG [M:0;8fc3ff0a63e6:41945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/799dfb2dd6ac499faa2831e3231eaeda is 52, key is load_balancer_on/state:d/1732250257203/Put/seqid=0 2024-11-22T04:38:24,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741883_1059 (size=5056) 2024-11-22T04:38:24,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741883_1059 (size=5056) 2024-11-22T04:38:24,555 INFO [M:0;8fc3ff0a63e6:41945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=129 (bloomFilter=true), to=hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/799dfb2dd6ac499faa2831e3231eaeda 2024-11-22T04:38:24,560 DEBUG [M:0;8fc3ff0a63e6:41945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/7191dc5172c44eb5ac37c45682d03729 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7191dc5172c44eb5ac37c45682d03729 2024-11-22T04:38:24,564 INFO [M:0;8fc3ff0a63e6:41945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/7191dc5172c44eb5ac37c45682d03729, entries=8, sequenceid=129, filesize=5.5 K 2024-11-22T04:38:24,565 DEBUG [M:0;8fc3ff0a63e6:41945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/7d58365ee94c4d2b9ce06c736a9daf0c as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7d58365ee94c4d2b9ce06c736a9daf0c 2024-11-22T04:38:24,569 INFO [M:0;8fc3ff0a63e6:41945 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 7d58365ee94c4d2b9ce06c736a9daf0c 2024-11-22T04:38:24,569 INFO [M:0;8fc3ff0a63e6:41945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/7d58365ee94c4d2b9ce06c736a9daf0c, entries=14, sequenceid=129, filesize=7.5 K 2024-11-22T04:38:24,570 DEBUG [M:0;8fc3ff0a63e6:41945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d523d9adfe514e169aeb3f63d3f9f931 as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d523d9adfe514e169aeb3f63d3f9f931 2024-11-22T04:38:24,574 INFO [M:0;8fc3ff0a63e6:41945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d523d9adfe514e169aeb3f63d3f9f931, entries=1, sequenceid=129, filesize=5.0 K 2024-11-22T04:38:24,575 DEBUG [M:0;8fc3ff0a63e6:41945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/799dfb2dd6ac499faa2831e3231eaeda as hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/799dfb2dd6ac499faa2831e3231eaeda 2024-11-22T04:38:24,580 INFO [M:0;8fc3ff0a63e6:41945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41387/user/jenkins/test-data/1b807f58-0dba-e14d-f180-5fe08ba8413b/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/799dfb2dd6ac499faa2831e3231eaeda, entries=1, sequenceid=129, filesize=4.9 K 2024-11-22T04:38:24,581 INFO [M:0;8fc3ff0a63e6:41945 {}] regionserver.HRegion(3140): Finished flush of dataSize ~53.72 KB/55009, heapSize ~65.90 KB/67480, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=129, compaction requested=false 2024-11-22T04:38:24,583 INFO [M:0;8fc3ff0a63e6:41945 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:38:24,584 DEBUG [M:0;8fc3ff0a63e6:41945 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732250304471Disabling compacts and flushes for region at 1732250304471Disabling writes for close at 1732250304471Obtaining lock to block concurrent updates at 1732250304471Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732250304471Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=55009, getHeapSize=67480, getOffHeapSize=0, getCellsCount=152 at 1732250304471Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732250304472 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732250304472Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732250304485 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732250304485Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732250304493 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732250304506 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732250304506Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732250304515 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732250304528 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732250304528Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732250304537 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732250304549 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732250304549Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@18086155: reopening flushed file at 1732250304559 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40c8f0f1: reopening flushed file at 1732250304564 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@47048bdf: reopening flushed file at 1732250304569 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c2f158c: reopening flushed file at 1732250304574 (+5 ms)Finished flush of dataSize ~53.72 KB/55009, heapSize ~65.90 KB/67480, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 110ms, sequenceid=129, compaction requested=false at 1732250304581 (+7 ms)Writing region close event to WAL at 1732250304583 (+2 ms)Closed at 1732250304583 2024-11-22T04:38:24,584 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,584 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,584 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,584 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,584 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:24,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41117 is added to blk_1073741830_1006 (size=63939) 2024-11-22T04:38:24,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34485 is added to blk_1073741830_1006 (size=63939) 2024-11-22T04:38:24,587 INFO [M:0;8fc3ff0a63e6:41945 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T04:38:24,587 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:38:24,587 INFO [M:0;8fc3ff0a63e6:41945 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41945 2024-11-22T04:38:24,587 INFO [M:0;8fc3ff0a63e6:41945 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:38:24,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:38:24,691 INFO [M:0;8fc3ff0a63e6:41945 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:38:24,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41945-0x10160d53d010000, quorum=127.0.0.1:60013, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:38:24,697 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5df314d1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:38:24,697 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a100062{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:38:24,697 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:38:24,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@48123521{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:38:24,698 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60ddaed5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/hadoop.log.dir/,STOPPED} 2024-11-22T04:38:24,701 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:38:24,701 WARN [BP-1915570739-172.17.0.2-1732250253374 heartbeating to localhost/127.0.0.1:41387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:38:24,701 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:38:24,701 WARN [BP-1915570739-172.17.0.2-1732250253374 heartbeating to localhost/127.0.0.1:41387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1915570739-172.17.0.2-1732250253374 (Datanode Uuid 7db8585e-eb95-4561-af0c-229b4745743f) service to localhost/127.0.0.1:41387 2024-11-22T04:38:24,702 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/cluster_a0b9ba52-7c91-1fc9-c6a0-3c04a754b7bf/data/data3/current/BP-1915570739-172.17.0.2-1732250253374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:38:24,702 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/cluster_a0b9ba52-7c91-1fc9-c6a0-3c04a754b7bf/data/data4/current/BP-1915570739-172.17.0.2-1732250253374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:38:24,702 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:38:24,704 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@49bf1df8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:38:24,705 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7d4c2da4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:38:24,705 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:38:24,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49490ce4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:38:24,705 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27d3c271{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/hadoop.log.dir/,STOPPED} 2024-11-22T04:38:24,707 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:38:24,707 WARN [BP-1915570739-172.17.0.2-1732250253374 heartbeating to localhost/127.0.0.1:41387 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:38:24,707 WARN [BP-1915570739-172.17.0.2-1732250253374 heartbeating to localhost/127.0.0.1:41387 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1915570739-172.17.0.2-1732250253374 (Datanode Uuid cfefd538-19df-479f-baf7-bb581da100ed) service to localhost/127.0.0.1:41387 2024-11-22T04:38:24,707 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:38:24,707 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/cluster_a0b9ba52-7c91-1fc9-c6a0-3c04a754b7bf/data/data1/current/BP-1915570739-172.17.0.2-1732250253374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:38:24,707 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/cluster_a0b9ba52-7c91-1fc9-c6a0-3c04a754b7bf/data/data2/current/BP-1915570739-172.17.0.2-1732250253374 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:38:24,707 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:38:24,712 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@417c1a7a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T04:38:24,713 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@78512cf7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:38:24,713 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:38:24,713 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@41a74ab6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:38:24,713 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@64d2170c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/hadoop.log.dir/,STOPPED} 2024-11-22T04:38:24,719 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T04:38:24,747 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T04:38:24,757 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=229 (was 205) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41387 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:41387 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41387 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:41387 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:41387 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41387 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=509 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=211 (was 228), ProcessCount=11 (was 11), AvailableMemoryMB=9035 (was 8310) - AvailableMemoryMB LEAK? - 2024-11-22T04:38:24,764 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=229, OpenFileDescriptor=509, MaxFileDescriptor=1048576, SystemLoadAverage=211, ProcessCount=11, AvailableMemoryMB=9035 2024-11-22T04:38:24,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-22T04:38:24,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/hadoop.log.dir so I do NOT create it in target/test-data/359f0180-2956-5273-95c4-a622a6bd957c 2024-11-22T04:38:24,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/88699cda-93ad-f4a9-c394-13d5e3265174/hadoop.tmp.dir so I do NOT create it in target/test-data/359f0180-2956-5273-95c4-a622a6bd957c 2024-11-22T04:38:24,764 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/cluster_9f9d5431-10b8-eb30-d33a-620665f1d921, deleteOnExit=true 2024-11-22T04:38:24,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-22T04:38:24,764 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/test.cache.data in system properties and HBase conf 2024-11-22T04:38:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/hadoop.tmp.dir in system properties and HBase conf 2024-11-22T04:38:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/hadoop.log.dir in system properties and HBase conf 2024-11-22T04:38:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-22T04:38:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-22T04:38:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-22T04:38:24,765 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-22T04:38:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-22T04:38:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-22T04:38:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-22T04:38:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T04:38:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-22T04:38:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-22T04:38:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-22T04:38:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T04:38:24,765 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-22T04:38:24,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/nfs.dump.dir in system properties and HBase conf 2024-11-22T04:38:24,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/java.io.tmpdir in system properties and HBase conf 2024-11-22T04:38:24,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-22T04:38:24,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-22T04:38:24,766 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-22T04:38:24,778 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T04:38:24,969 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:24,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:25,103 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:38:25,106 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:38:25,107 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:38:25,107 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:38:25,107 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:38:25,108 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:38:25,108 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@a8a2fb4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:38:25,109 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@663b7fb1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:38:25,199 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@534394c4{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/java.io.tmpdir/jetty-localhost-46549-hadoop-hdfs-3_4_1-tests_jar-_-any-9065087416248476865/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T04:38:25,200 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7acdff1a{HTTP/1.1, (http/1.1)}{localhost:46549} 2024-11-22T04:38:25,200 INFO [Time-limited test {}] server.Server(415): Started @302466ms 2024-11-22T04:38:25,225 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-22T04:38:25,506 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:38:25,509 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:38:25,509 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:38:25,509 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:38:25,510 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-22T04:38:25,510 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20dd8a9c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:38:25,510 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59532081{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:38:25,603 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@616bab4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/java.io.tmpdir/jetty-localhost-46007-hadoop-hdfs-3_4_1-tests_jar-_-any-7888628725215868973/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:38:25,604 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2269c58e{HTTP/1.1, (http/1.1)}{localhost:46007} 2024-11-22T04:38:25,604 INFO [Time-limited test {}] server.Server(415): Started @302871ms 2024-11-22T04:38:25,605 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:38:25,629 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-22T04:38:25,631 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-22T04:38:25,632 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-22T04:38:25,632 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-22T04:38:25,632 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-22T04:38:25,632 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@290d7c20{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/hadoop.log.dir/,AVAILABLE} 2024-11-22T04:38:25,632 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c78c12c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-22T04:38:25,724 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@62b479b9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/java.io.tmpdir/jetty-localhost-38347-hadoop-hdfs-3_4_1-tests_jar-_-any-6980663496185935086/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:38:25,724 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6724e6e5{HTTP/1.1, (http/1.1)}{localhost:38347} 2024-11-22T04:38:25,724 INFO [Time-limited test {}] server.Server(415): Started @302991ms 2024-11-22T04:38:25,725 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-22T04:38:25,970 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:25,973 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:26,776 WARN [Thread-2490 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/cluster_9f9d5431-10b8-eb30-d33a-620665f1d921/data/data1/current/BP-2097659817-172.17.0.2-1732250304782/current, will proceed with Du for space computation calculation, 2024-11-22T04:38:26,776 WARN [Thread-2491 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/cluster_9f9d5431-10b8-eb30-d33a-620665f1d921/data/data2/current/BP-2097659817-172.17.0.2-1732250304782/current, will proceed with Du for space computation calculation, 2024-11-22T04:38:26,799 WARN [Thread-2454 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:38:26,801 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4f23b53a91381074 with lease ID 0x38f9a43029750e56: Processing first storage report for DS-7dd3e8cf-492d-4dbc-990c-67178be563e4 from datanode DatanodeRegistration(127.0.0.1:35201, datanodeUuid=0721f1bf-f570-4604-b27e-b4cc2f897206, infoPort=43949, infoSecurePort=0, ipcPort=45225, storageInfo=lv=-57;cid=testClusterID;nsid=283467109;c=1732250304782) 2024-11-22T04:38:26,801 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f23b53a91381074 with lease ID 0x38f9a43029750e56: from storage DS-7dd3e8cf-492d-4dbc-990c-67178be563e4 node DatanodeRegistration(127.0.0.1:35201, datanodeUuid=0721f1bf-f570-4604-b27e-b4cc2f897206, infoPort=43949, infoSecurePort=0, ipcPort=45225, storageInfo=lv=-57;cid=testClusterID;nsid=283467109;c=1732250304782), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:38:26,801 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4f23b53a91381074 with lease ID 0x38f9a43029750e56: Processing first storage report for DS-8783b4ee-537e-4c96-b6c3-20a5d7eec80a from datanode DatanodeRegistration(127.0.0.1:35201, datanodeUuid=0721f1bf-f570-4604-b27e-b4cc2f897206, infoPort=43949, infoSecurePort=0, ipcPort=45225, storageInfo=lv=-57;cid=testClusterID;nsid=283467109;c=1732250304782) 2024-11-22T04:38:26,801 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4f23b53a91381074 with lease ID 0x38f9a43029750e56: from storage DS-8783b4ee-537e-4c96-b6c3-20a5d7eec80a node DatanodeRegistration(127.0.0.1:35201, datanodeUuid=0721f1bf-f570-4604-b27e-b4cc2f897206, infoPort=43949, infoSecurePort=0, ipcPort=45225, storageInfo=lv=-57;cid=testClusterID;nsid=283467109;c=1732250304782), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-22T04:38:26,888 WARN [Thread-2501 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/cluster_9f9d5431-10b8-eb30-d33a-620665f1d921/data/data3/current/BP-2097659817-172.17.0.2-1732250304782/current, will proceed with Du for space computation calculation, 2024-11-22T04:38:26,888 WARN [Thread-2502 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/cluster_9f9d5431-10b8-eb30-d33a-620665f1d921/data/data4/current/BP-2097659817-172.17.0.2-1732250304782/current, will proceed with Du for space computation calculation, 2024-11-22T04:38:26,909 WARN [Thread-2477 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-22T04:38:26,911 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8eadf08bc173e8d with lease ID 0x38f9a43029750e57: Processing first storage report for DS-91e5c62e-7d6e-41a2-92bd-445b1520e494 from datanode DatanodeRegistration(127.0.0.1:40183, datanodeUuid=1b97dde5-f2b7-4395-810a-c5fcd4a8e1b3, infoPort=45153, infoSecurePort=0, ipcPort=33773, storageInfo=lv=-57;cid=testClusterID;nsid=283467109;c=1732250304782) 2024-11-22T04:38:26,911 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8eadf08bc173e8d with lease ID 0x38f9a43029750e57: from storage DS-91e5c62e-7d6e-41a2-92bd-445b1520e494 node DatanodeRegistration(127.0.0.1:40183, datanodeUuid=1b97dde5-f2b7-4395-810a-c5fcd4a8e1b3, infoPort=45153, infoSecurePort=0, ipcPort=33773, storageInfo=lv=-57;cid=testClusterID;nsid=283467109;c=1732250304782), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:38:26,912 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8eadf08bc173e8d with lease ID 0x38f9a43029750e57: Processing first storage report for DS-d4bf2aaa-314d-4b0e-999a-35a6fc22d6d7 from datanode DatanodeRegistration(127.0.0.1:40183, datanodeUuid=1b97dde5-f2b7-4395-810a-c5fcd4a8e1b3, infoPort=45153, infoSecurePort=0, ipcPort=33773, storageInfo=lv=-57;cid=testClusterID;nsid=283467109;c=1732250304782) 2024-11-22T04:38:26,912 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8eadf08bc173e8d with lease ID 0x38f9a43029750e57: from storage DS-d4bf2aaa-314d-4b0e-999a-35a6fc22d6d7 node DatanodeRegistration(127.0.0.1:40183, datanodeUuid=1b97dde5-f2b7-4395-810a-c5fcd4a8e1b3, infoPort=45153, infoSecurePort=0, ipcPort=33773, storageInfo=lv=-57;cid=testClusterID;nsid=283467109;c=1732250304782), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-22T04:38:26,955 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c 2024-11-22T04:38:26,959 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/cluster_9f9d5431-10b8-eb30-d33a-620665f1d921/zookeeper_0, clientPort=51200, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/cluster_9f9d5431-10b8-eb30-d33a-620665f1d921/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/cluster_9f9d5431-10b8-eb30-d33a-620665f1d921/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-22T04:38:26,960 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=51200 2024-11-22T04:38:26,960 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:38:26,962 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:38:26,971 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:26,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741825_1001 (size=7) 2024-11-22T04:38:26,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741825_1001 (size=7) 2024-11-22T04:38:26,974 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979 with version=8 2024-11-22T04:38:26,974 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:37209/user/jenkins/test-data/43189c1c-ff6f-af8b-10ec-edd5570a10f4/hbase-staging 2024-11-22T04:38:26,974 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:26,976 INFO [Time-limited test {}] client.ConnectionUtils(128): master/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:38:26,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:38:26,976 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:38:26,977 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:38:26,977 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:38:26,977 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:38:26,977 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-22T04:38:26,977 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:38:26,977 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32803 2024-11-22T04:38:26,978 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32803 connecting to ZooKeeper ensemble=127.0.0.1:51200 2024-11-22T04:38:27,026 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:328030x0, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:38:27,026 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32803-0x10160d605390000 connected 2024-11-22T04:38:27,107 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:38:27,110 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:38:27,115 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:38:27,115 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979, hbase.cluster.distributed=false 2024-11-22T04:38:27,119 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:38:27,120 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32803 2024-11-22T04:38:27,120 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32803 2024-11-22T04:38:27,120 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32803 2024-11-22T04:38:27,121 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32803 2024-11-22T04:38:27,121 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32803 2024-11-22T04:38:27,136 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/8fc3ff0a63e6:0 server-side Connection retries=45 2024-11-22T04:38:27,136 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:38:27,136 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-22T04:38:27,136 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-22T04:38:27,136 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-22T04:38:27,137 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-22T04:38:27,137 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-22T04:38:27,137 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-22T04:38:27,137 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37065 2024-11-22T04:38:27,138 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37065 connecting to ZooKeeper ensemble=127.0.0.1:51200 2024-11-22T04:38:27,139 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:38:27,140 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:38:27,149 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:370650x0, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-22T04:38:27,149 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37065-0x10160d605390001 connected 2024-11-22T04:38:27,149 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:38:27,149 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-22T04:38:27,150 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-22T04:38:27,150 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-22T04:38:27,151 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-22T04:38:27,151 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37065 2024-11-22T04:38:27,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37065 2024-11-22T04:38:27,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37065 2024-11-22T04:38:27,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37065 2024-11-22T04:38:27,152 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37065 2024-11-22T04:38:27,164 DEBUG [M:0;8fc3ff0a63e6:32803 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;8fc3ff0a63e6:32803 2024-11-22T04:38:27,165 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/8fc3ff0a63e6,32803,1732250306976 2024-11-22T04:38:27,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:38:27,170 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:38:27,170 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/8fc3ff0a63e6,32803,1732250306976 2024-11-22T04:38:27,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:27,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-22T04:38:27,180 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:27,181 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-22T04:38:27,182 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/8fc3ff0a63e6,32803,1732250306976 from backup master directory 2024-11-22T04:38:27,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:38:27,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/8fc3ff0a63e6,32803,1732250306976 2024-11-22T04:38:27,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-22T04:38:27,191 WARN [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:38:27,191 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=8fc3ff0a63e6,32803,1732250306976 2024-11-22T04:38:27,197 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/hbase.id] with ID: c4b45ed1-2b58-4790-aed8-d8692505cc21 2024-11-22T04:38:27,197 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/.tmp/hbase.id 2024-11-22T04:38:27,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:38:27,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741826_1002 (size=42) 2024-11-22T04:38:27,208 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/.tmp/hbase.id]:[hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/hbase.id] 2024-11-22T04:38:27,222 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:38:27,222 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-22T04:38:27,223 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-22T04:38:27,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:27,233 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:27,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741827_1003 (size=196) 2024-11-22T04:38:27,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741827_1003 (size=196) 2024-11-22T04:38:27,240 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-22T04:38:27,241 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-22T04:38:27,241 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:38:27,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:38:27,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741828_1004 (size=1189) 2024-11-22T04:38:27,251 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store 2024-11-22T04:38:27,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741829_1005 (size=34) 2024-11-22T04:38:27,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741829_1005 (size=34) 2024-11-22T04:38:27,259 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:38:27,259 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T04:38:27,259 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:38:27,259 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:38:27,259 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T04:38:27,259 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:38:27,259 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:38:27,259 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732250307259Disabling compacts and flushes for region at 1732250307259Disabling writes for close at 1732250307259Writing region close event to WAL at 1732250307259Closed at 1732250307259 2024-11-22T04:38:27,260 WARN [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/.initializing 2024-11-22T04:38:27,260 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/WALs/8fc3ff0a63e6,32803,1732250306976 2024-11-22T04:38:27,262 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C32803%2C1732250306976, suffix=, logDir=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/WALs/8fc3ff0a63e6,32803,1732250306976, archiveDir=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/oldWALs, maxLogs=10 2024-11-22T04:38:27,263 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C32803%2C1732250306976.1732250307263 2024-11-22T04:38:27,267 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/WALs/8fc3ff0a63e6,32803,1732250306976/8fc3ff0a63e6%2C32803%2C1732250306976.1732250307263 2024-11-22T04:38:27,268 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45153:45153),(127.0.0.1/127.0.0.1:43949:43949)] 2024-11-22T04:38:27,271 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:38:27,272 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:38:27,272 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:38:27,272 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:38:27,273 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:38:27,274 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-22T04:38:27,274 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:38:27,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:38:27,275 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:38:27,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-22T04:38:27,276 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:38:27,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:38:27,276 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:38:27,277 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-22T04:38:27,277 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:38:27,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:38:27,278 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:38:27,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-22T04:38:27,279 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:38:27,279 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-22T04:38:27,279 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:38:27,280 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:38:27,280 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:38:27,281 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:38:27,281 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:38:27,281 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-22T04:38:27,282 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-22T04:38:27,284 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:38:27,284 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=767493, jitterRate=-0.02408255636692047}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-22T04:38:27,284 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732250307272Initializing all the Stores at 1732250307273 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250307273Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250307273Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250307273Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250307273Cleaning up temporary data from old regions at 1732250307281 (+8 ms)Region opened successfully at 1732250307284 (+3 ms) 2024-11-22T04:38:27,285 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-22T04:38:27,287 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4730b0d0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:38:27,288 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-22T04:38:27,288 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-22T04:38:27,288 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-22T04:38:27,288 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-22T04:38:27,288 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-22T04:38:27,288 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-22T04:38:27,288 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-22T04:38:27,290 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-22T04:38:27,291 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-22T04:38:27,296 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-22T04:38:27,296 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-22T04:38:27,297 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-22T04:38:27,306 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-22T04:38:27,307 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-22T04:38:27,307 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-22T04:38:27,317 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-22T04:38:27,318 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-22T04:38:27,327 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-22T04:38:27,330 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-22T04:38:27,338 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-22T04:38:27,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T04:38:27,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-22T04:38:27,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:27,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:27,350 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=8fc3ff0a63e6,32803,1732250306976, sessionid=0x10160d605390000, setting cluster-up flag (Was=false) 2024-11-22T04:38:27,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:27,370 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:27,402 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-22T04:38:27,405 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8fc3ff0a63e6,32803,1732250306976 2024-11-22T04:38:27,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:27,444 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:27,475 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-22T04:38:27,479 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=8fc3ff0a63e6,32803,1732250306976 2024-11-22T04:38:27,481 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-22T04:38:27,483 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-22T04:38:27,484 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-22T04:38:27,484 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-22T04:38:27,484 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 8fc3ff0a63e6,32803,1732250306976 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-22T04:38:27,486 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:38:27,486 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:38:27,486 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:38:27,486 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=5, maxPoolSize=5 2024-11-22T04:38:27,486 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/8fc3ff0a63e6:0, corePoolSize=10, maxPoolSize=10 2024-11-22T04:38:27,486 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:38:27,487 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:38:27,487 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:38:27,488 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732250337487 2024-11-22T04:38:27,488 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-22T04:38:27,488 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-22T04:38:27,488 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-22T04:38:27,488 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-22T04:38:27,488 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-22T04:38:27,488 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-22T04:38:27,488 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:38:27,488 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:27,488 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-22T04:38:27,488 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-22T04:38:27,489 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-22T04:38:27,489 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-22T04:38:27,489 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-22T04:38:27,489 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-22T04:38:27,489 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250307489,5,FailOnTimeoutGroup] 2024-11-22T04:38:27,489 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250307489,5,FailOnTimeoutGroup] 2024-11-22T04:38:27,489 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:27,489 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-22T04:38:27,489 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:38:27,489 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:27,489 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:27,489 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-22T04:38:27,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:38:27,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741831_1007 (size=1321) 2024-11-22T04:38:27,497 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-22T04:38:27,497 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979 2024-11-22T04:38:27,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:38:27,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741832_1008 (size=32) 2024-11-22T04:38:27,504 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:38:27,506 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T04:38:27,507 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T04:38:27,507 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:38:27,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:38:27,508 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T04:38:27,509 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T04:38:27,509 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:38:27,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:38:27,509 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T04:38:27,510 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T04:38:27,510 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:38:27,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:38:27,510 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T04:38:27,511 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T04:38:27,511 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:38:27,511 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:38:27,511 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T04:38:27,512 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/data/hbase/meta/1588230740 2024-11-22T04:38:27,512 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/data/hbase/meta/1588230740 2024-11-22T04:38:27,513 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T04:38:27,513 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T04:38:27,513 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T04:38:27,514 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T04:38:27,516 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-22T04:38:27,516 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=829362, jitterRate=0.054588764905929565}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T04:38:27,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732250307505Initializing all the Stores at 1732250307505Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250307505Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250307506 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250307506Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250307506Cleaning up temporary data from old regions at 1732250307513 (+7 ms)Region opened successfully at 1732250307516 (+3 ms) 2024-11-22T04:38:27,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T04:38:27,517 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T04:38:27,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T04:38:27,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T04:38:27,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T04:38:27,517 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T04:38:27,517 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732250307517Disabling compacts and flushes for region at 1732250307517Disabling writes for close at 1732250307517Writing region close event to WAL at 1732250307517Closed at 1732250307517 2024-11-22T04:38:27,518 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:38:27,518 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-22T04:38:27,518 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-22T04:38:27,519 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T04:38:27,520 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-22T04:38:27,555 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(746): ClusterId : c4b45ed1-2b58-4790-aed8-d8692505cc21 2024-11-22T04:38:27,555 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-22T04:38:27,561 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-22T04:38:27,561 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-22T04:38:27,572 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-22T04:38:27,573 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c2fb2a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=8fc3ff0a63e6/172.17.0.2:0 2024-11-22T04:38:27,588 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;8fc3ff0a63e6:37065 2024-11-22T04:38:27,588 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-22T04:38:27,588 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-22T04:38:27,588 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-22T04:38:27,589 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(2659): reportForDuty to master=8fc3ff0a63e6,32803,1732250306976 with port=37065, startcode=1732250307136 2024-11-22T04:38:27,589 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-22T04:38:27,591 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50057, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-22T04:38:27,592 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32803 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 8fc3ff0a63e6,37065,1732250307136 2024-11-22T04:38:27,592 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32803 {}] master.ServerManager(517): Registering regionserver=8fc3ff0a63e6,37065,1732250307136 2024-11-22T04:38:27,593 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979 2024-11-22T04:38:27,593 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44051 2024-11-22T04:38:27,593 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-22T04:38:27,601 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:38:27,602 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] zookeeper.ZKUtil(111): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/8fc3ff0a63e6,37065,1732250307136 2024-11-22T04:38:27,602 WARN [RS:0;8fc3ff0a63e6:37065 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-22T04:38:27,602 INFO [RS:0;8fc3ff0a63e6:37065 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:38:27,602 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/WALs/8fc3ff0a63e6,37065,1732250307136 2024-11-22T04:38:27,602 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [8fc3ff0a63e6,37065,1732250307136] 2024-11-22T04:38:27,606 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-22T04:38:27,608 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-22T04:38:27,608 INFO [RS:0;8fc3ff0a63e6:37065 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-22T04:38:27,608 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:27,608 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-22T04:38:27,609 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-22T04:38:27,609 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:27,609 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:38:27,609 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:38:27,609 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:38:27,609 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:38:27,609 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:38:27,609 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=2, maxPoolSize=2 2024-11-22T04:38:27,609 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:38:27,609 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:38:27,610 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:38:27,610 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:38:27,610 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:38:27,610 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/8fc3ff0a63e6:0, corePoolSize=1, maxPoolSize=1 2024-11-22T04:38:27,610 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:38:27,610 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/8fc3ff0a63e6:0, corePoolSize=3, maxPoolSize=3 2024-11-22T04:38:27,610 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:27,610 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:27,610 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:27,610 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:27,610 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:27,610 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,37065,1732250307136-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:38:27,626 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-22T04:38:27,626 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,37065,1732250307136-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:27,626 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:27,626 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.Replication(171): 8fc3ff0a63e6,37065,1732250307136 started 2024-11-22T04:38:27,638 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:27,638 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(1482): Serving as 8fc3ff0a63e6,37065,1732250307136, RpcServer on 8fc3ff0a63e6/172.17.0.2:37065, sessionid=0x10160d605390001 2024-11-22T04:38:27,638 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-22T04:38:27,638 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 8fc3ff0a63e6,37065,1732250307136 2024-11-22T04:38:27,638 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,37065,1732250307136' 2024-11-22T04:38:27,638 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-22T04:38:27,639 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-22T04:38:27,639 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-22T04:38:27,639 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-22T04:38:27,639 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 8fc3ff0a63e6,37065,1732250307136 2024-11-22T04:38:27,639 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '8fc3ff0a63e6,37065,1732250307136' 2024-11-22T04:38:27,639 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-22T04:38:27,639 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-22T04:38:27,640 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-22T04:38:27,640 INFO [RS:0;8fc3ff0a63e6:37065 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-22T04:38:27,640 INFO [RS:0;8fc3ff0a63e6:37065 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-22T04:38:27,670 WARN [8fc3ff0a63e6:32803 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-22T04:38:27,744 INFO [RS:0;8fc3ff0a63e6:37065 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C37065%2C1732250307136, suffix=, logDir=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/WALs/8fc3ff0a63e6,37065,1732250307136, archiveDir=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/oldWALs, maxLogs=32 2024-11-22T04:38:27,745 INFO [RS:0;8fc3ff0a63e6:37065 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C37065%2C1732250307136.1732250307745 2024-11-22T04:38:27,757 INFO [RS:0;8fc3ff0a63e6:37065 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/WALs/8fc3ff0a63e6,37065,1732250307136/8fc3ff0a63e6%2C37065%2C1732250307136.1732250307745 2024-11-22T04:38:27,759 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45153:45153),(127.0.0.1/127.0.0.1:43949:43949)] 2024-11-22T04:38:27,920 DEBUG [8fc3ff0a63e6:32803 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-22T04:38:27,921 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=8fc3ff0a63e6,37065,1732250307136 2024-11-22T04:38:27,924 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8fc3ff0a63e6,37065,1732250307136, state=OPENING 2024-11-22T04:38:27,972 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,38269,1732250115127/8fc3ff0a63e6%2C38269%2C1732250115127.1732250115358 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:27,975 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:39459/user/jenkins/test-data/786ae595-f2d2-662d-5fa1-d78868b77da0/WALs/8fc3ff0a63e6,37839,1732250113358/8fc3ff0a63e6%2C37839%2C1732250113358.meta.1732250114864.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor106.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-3.0.0-beta-2-SNAPSHOT.jar:3.0.0-beta-2-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-22T04:38:28,002 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-22T04:38:28,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:28,012 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:28,014 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-22T04:38:28,014 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:38:28,014 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:38:28,015 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,37065,1732250307136}] 2024-11-22T04:38:28,171 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-22T04:38:28,175 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41725, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-22T04:38:28,180 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-22T04:38:28,180 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:38:28,182 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=8fc3ff0a63e6%2C37065%2C1732250307136.meta, suffix=.meta, logDir=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/WALs/8fc3ff0a63e6,37065,1732250307136, archiveDir=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/oldWALs, maxLogs=32 2024-11-22T04:38:28,183 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 8fc3ff0a63e6%2C37065%2C1732250307136.meta.1732250308183.meta 2024-11-22T04:38:28,188 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/WALs/8fc3ff0a63e6,37065,1732250307136/8fc3ff0a63e6%2C37065%2C1732250307136.meta.1732250308183.meta 2024-11-22T04:38:28,192 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43949:43949),(127.0.0.1/127.0.0.1:45153:45153)] 2024-11-22T04:38:28,197 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-22T04:38:28,197 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-22T04:38:28,197 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-22T04:38:28,197 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-22T04:38:28,197 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-22T04:38:28,197 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-22T04:38:28,197 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-22T04:38:28,197 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-22T04:38:28,199 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-22T04:38:28,199 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-22T04:38:28,199 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:38:28,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:38:28,200 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-22T04:38:28,200 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-22T04:38:28,200 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:38:28,201 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:38:28,201 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-22T04:38:28,201 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-22T04:38:28,201 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:38:28,202 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:38:28,202 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-22T04:38:28,203 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-22T04:38:28,203 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-22T04:38:28,203 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-22T04:38:28,203 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-22T04:38:28,204 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/data/hbase/meta/1588230740 2024-11-22T04:38:28,204 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/data/hbase/meta/1588230740 2024-11-22T04:38:28,205 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-22T04:38:28,205 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-22T04:38:28,206 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-22T04:38:28,207 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-22T04:38:28,207 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=794618, jitterRate=0.01040966808795929}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-22T04:38:28,207 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-22T04:38:28,208 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732250308197Writing region info on filesystem at 1732250308197Initializing all the Stores at 1732250308198 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250308198Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250308198Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732250308198Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732250308198Cleaning up temporary data from old regions at 1732250308205 (+7 ms)Running coprocessor post-open hooks at 1732250308207 (+2 ms)Region opened successfully at 1732250308208 (+1 ms) 2024-11-22T04:38:28,209 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732250308170 2024-11-22T04:38:28,210 DEBUG [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-22T04:38:28,210 INFO [RS_OPEN_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-22T04:38:28,211 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=8fc3ff0a63e6,37065,1732250307136 2024-11-22T04:38:28,211 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 8fc3ff0a63e6,37065,1732250307136, state=OPEN 2024-11-22T04:38:28,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T04:38:28,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-22T04:38:28,247 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,37065,1732250307136 2024-11-22T04:38:28,248 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:38:28,248 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-22T04:38:28,251 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-22T04:38:28,251 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=8fc3ff0a63e6,37065,1732250307136 in 233 msec 2024-11-22T04:38:28,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-22T04:38:28,255 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 733 msec 2024-11-22T04:38:28,256 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-22T04:38:28,256 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-22T04:38:28,258 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T04:38:28,258 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8fc3ff0a63e6,37065,1732250307136, seqNum=-1] 2024-11-22T04:38:28,258 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T04:38:28,260 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49917, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T04:38:28,266 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 782 msec 2024-11-22T04:38:28,266 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732250308266, completionTime=-1 2024-11-22T04:38:28,266 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-22T04:38:28,266 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-22T04:38:28,269 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-22T04:38:28,269 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732250368269 2024-11-22T04:38:28,269 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732250428269 2024-11-22T04:38:28,269 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-22T04:38:28,269 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,32803,1732250306976-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:28,269 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,32803,1732250306976-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:28,269 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,32803,1732250306976-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:28,270 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-8fc3ff0a63e6:32803, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:28,270 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:28,270 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:28,272 DEBUG [master/8fc3ff0a63e6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-22T04:38:28,276 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.085sec 2024-11-22T04:38:28,276 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-22T04:38:28,276 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-22T04:38:28,276 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-22T04:38:28,276 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-22T04:38:28,276 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-22T04:38:28,276 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,32803,1732250306976-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-22T04:38:28,277 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,32803,1732250306976-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-22T04:38:28,280 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-22T04:38:28,280 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-22T04:38:28,280 INFO [master/8fc3ff0a63e6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=8fc3ff0a63e6,32803,1732250306976-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-22T04:38:28,356 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70f66986, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:38:28,356 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 8fc3ff0a63e6,32803,-1 for getting cluster id 2024-11-22T04:38:28,357 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-22T04:38:28,360 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'c4b45ed1-2b58-4790-aed8-d8692505cc21' 2024-11-22T04:38:28,360 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-22T04:38:28,361 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "c4b45ed1-2b58-4790-aed8-d8692505cc21" 2024-11-22T04:38:28,361 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bff9f70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:38:28,361 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [8fc3ff0a63e6,32803,-1] 2024-11-22T04:38:28,362 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-22T04:38:28,362 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:38:28,363 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50728, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-22T04:38:28,363 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b8e009f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-22T04:38:28,364 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-22T04:38:28,364 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=8fc3ff0a63e6,37065,1732250307136, seqNum=-1] 2024-11-22T04:38:28,365 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-22T04:38:28,365 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43706, version=3.0.0-beta-2-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-22T04:38:28,367 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=8fc3ff0a63e6,32803,1732250306976 2024-11-22T04:38:28,367 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-22T04:38:28,369 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-22T04:38:28,369 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-22T04:38:28,371 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/WALs/test.com,8080,1, archiveDir=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/oldWALs, maxLogs=32 2024-11-22T04:38:28,372 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732250308371 2024-11-22T04:38:28,377 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/WALs/test.com,8080,1/test.com%2C8080%2C1.1732250308371 2024-11-22T04:38:28,378 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43949:43949),(127.0.0.1/127.0.0.1:45153:45153)] 2024-11-22T04:38:28,379 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732250308379 2024-11-22T04:38:28,386 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,386 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,386 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,386 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,386 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,386 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/WALs/test.com,8080,1/test.com%2C8080%2C1.1732250308371 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/WALs/test.com,8080,1/test.com%2C8080%2C1.1732250308379 2024-11-22T04:38:28,388 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43949:43949),(127.0.0.1/127.0.0.1:45153:45153)] 2024-11-22T04:38:28,388 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/WALs/test.com,8080,1/test.com%2C8080%2C1.1732250308371 is not closed yet, will try archiving it next time 2024-11-22T04:38:28,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741835_1011 (size=93) 2024-11-22T04:38:28,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741835_1011 (size=93) 2024-11-22T04:38:28,389 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,389 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,389 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,389 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,389 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,390 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/WALs/test.com,8080,1/test.com%2C8080%2C1.1732250308371 to hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/oldWALs/test.com%2C8080%2C1.1732250308371 2024-11-22T04:38:28,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741836_1012 (size=93) 2024-11-22T04:38:28,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741836_1012 (size=93) 2024-11-22T04:38:28,392 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/oldWALs 2024-11-22T04:38:28,392 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732250308379) 2024-11-22T04:38:28,392 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-22T04:38:28,393 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T04:38:28,393 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:38:28,393 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:38:28,393 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:38:28,393 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-22T04:38:28,393 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-22T04:38:28,393 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=79769275, stopped=false 2024-11-22T04:38:28,393 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=8fc3ff0a63e6,32803,1732250306976 2024-11-22T04:38:28,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:38:28,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-22T04:38:28,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:28,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:28,412 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T04:38:28,412 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-22T04:38:28,412 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:38:28,412 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:38:28,412 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '8fc3ff0a63e6,37065,1732250307136' ***** 2024-11-22T04:38:28,412 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:38:28,412 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-22T04:38:28,413 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-22T04:38:28,413 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-22T04:38:28,413 INFO [RS:0;8fc3ff0a63e6:37065 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-22T04:38:28,413 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-22T04:38:28,413 INFO [RS:0;8fc3ff0a63e6:37065 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-22T04:38:28,413 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(959): stopping server 8fc3ff0a63e6,37065,1732250307136 2024-11-22T04:38:28,413 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:38:28,413 INFO [RS:0;8fc3ff0a63e6:37065 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;8fc3ff0a63e6:37065. 2024-11-22T04:38:28,413 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-22T04:38:28,413 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:38:28,413 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-22T04:38:28,413 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-22T04:38:28,413 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-22T04:38:28,413 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-22T04:38:28,414 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-22T04:38:28,414 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-22T04:38:28,414 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-22T04:38:28,414 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-22T04:38:28,414 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-22T04:38:28,414 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-22T04:38:28,414 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-22T04:38:28,414 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-22T04:38:28,414 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-22T04:38:28,430 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/data/hbase/meta/1588230740/.tmp/ns/577987baa1ce47fb8cdbb665523706cf is 43, key is default/ns:d/1732250308260/Put/seqid=0 2024-11-22T04:38:28,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741837_1013 (size=5153) 2024-11-22T04:38:28,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741837_1013 (size=5153) 2024-11-22T04:38:28,435 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/data/hbase/meta/1588230740/.tmp/ns/577987baa1ce47fb8cdbb665523706cf 2024-11-22T04:38:28,440 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/data/hbase/meta/1588230740/.tmp/ns/577987baa1ce47fb8cdbb665523706cf as hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/data/hbase/meta/1588230740/ns/577987baa1ce47fb8cdbb665523706cf 2024-11-22T04:38:28,444 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/data/hbase/meta/1588230740/ns/577987baa1ce47fb8cdbb665523706cf, entries=2, sequenceid=6, filesize=5.0 K 2024-11-22T04:38:28,445 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false 2024-11-22T04:38:28,445 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-22T04:38:28,449 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-22T04:38:28,450 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-22T04:38:28,450 INFO [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-22T04:38:28,450 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732250308414Running coprocessor pre-close hooks at 1732250308414Disabling compacts and flushes for region at 1732250308414Disabling writes for close at 1732250308414Obtaining lock to block concurrent updates at 1732250308414Preparing flush snapshotting stores in 1588230740 at 1732250308414Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732250308415 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732250308415Flushing 1588230740/ns: creating writer at 1732250308415Flushing 1588230740/ns: appending metadata at 1732250308430 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1732250308430Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@635efdd5: reopening flushed file at 1732250308439 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=6, compaction requested=false at 1732250308445 (+6 ms)Writing region close event to WAL at 1732250308446 (+1 ms)Running coprocessor post-close hooks at 1732250308450 (+4 ms)Closed at 1732250308450 2024-11-22T04:38:28,450 DEBUG [RS_CLOSE_META-regionserver/8fc3ff0a63e6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-22T04:38:28,614 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(976): stopping server 8fc3ff0a63e6,37065,1732250307136; all regions closed. 2024-11-22T04:38:28,615 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,615 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,616 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,616 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,616 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741834_1010 (size=1152) 2024-11-22T04:38:28,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741834_1010 (size=1152) 2024-11-22T04:38:28,623 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/oldWALs 2024-11-22T04:38:28,623 INFO [RS:0;8fc3ff0a63e6:37065 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C37065%2C1732250307136.meta:.meta(num 1732250308183) 2024-11-22T04:38:28,624 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,624 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,624 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,624 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,624 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741833_1009 (size=93) 2024-11-22T04:38:28,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741833_1009 (size=93) 2024-11-22T04:38:28,628 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/oldWALs 2024-11-22T04:38:28,628 INFO [RS:0;8fc3ff0a63e6:37065 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 8fc3ff0a63e6%2C37065%2C1732250307136:(num 1732250307745) 2024-11-22T04:38:28,628 DEBUG [RS:0;8fc3ff0a63e6:37065 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-22T04:38:28,628 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.LeaseManager(133): Closed leases 2024-11-22T04:38:28,628 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:38:28,628 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.ChoreService(370): Chore service for: regionserver/8fc3ff0a63e6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-22T04:38:28,628 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:38:28,628 INFO [regionserver/8fc3ff0a63e6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:38:28,628 INFO [RS:0;8fc3ff0a63e6:37065 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37065 2024-11-22T04:38:28,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-22T04:38:28,633 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/8fc3ff0a63e6,37065,1732250307136 2024-11-22T04:38:28,633 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:38:28,633 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [8fc3ff0a63e6,37065,1732250307136] 2024-11-22T04:38:28,654 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/8fc3ff0a63e6,37065,1732250307136 already deleted, retry=false 2024-11-22T04:38:28,654 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 8fc3ff0a63e6,37065,1732250307136 expired; onlineServers=0 2024-11-22T04:38:28,654 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '8fc3ff0a63e6,32803,1732250306976' ***** 2024-11-22T04:38:28,654 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-22T04:38:28,654 INFO [M:0;8fc3ff0a63e6:32803 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-22T04:38:28,654 INFO [M:0;8fc3ff0a63e6:32803 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-22T04:38:28,654 DEBUG [M:0;8fc3ff0a63e6:32803 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-22T04:38:28,654 DEBUG [M:0;8fc3ff0a63e6:32803 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-22T04:38:28,654 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-22T04:38:28,654 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250307489 {}] cleaner.HFileCleaner(306): Exit Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.large.0-1732250307489,5,FailOnTimeoutGroup] 2024-11-22T04:38:28,654 DEBUG [master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250307489 {}] cleaner.HFileCleaner(306): Exit Thread[master/8fc3ff0a63e6:0:becomeActiveMaster-HFileCleaner.small.0-1732250307489,5,FailOnTimeoutGroup] 2024-11-22T04:38:28,654 INFO [M:0;8fc3ff0a63e6:32803 {}] hbase.ChoreService(370): Chore service for: master/8fc3ff0a63e6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-22T04:38:28,655 INFO [M:0;8fc3ff0a63e6:32803 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-22T04:38:28,655 DEBUG [M:0;8fc3ff0a63e6:32803 {}] master.HMaster(1795): Stopping service threads 2024-11-22T04:38:28,655 INFO [M:0;8fc3ff0a63e6:32803 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-22T04:38:28,655 INFO [M:0;8fc3ff0a63e6:32803 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-22T04:38:28,655 INFO [M:0;8fc3ff0a63e6:32803 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-22T04:38:28,655 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-22T04:38:28,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-22T04:38:28,664 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-22T04:38:28,664 DEBUG [M:0;8fc3ff0a63e6:32803 {}] zookeeper.ZKUtil(347): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-22T04:38:28,665 WARN [M:0;8fc3ff0a63e6:32803 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-22T04:38:28,665 INFO [M:0;8fc3ff0a63e6:32803 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/.lastflushedseqids 2024-11-22T04:38:28,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741838_1014 (size=99) 2024-11-22T04:38:28,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741838_1014 (size=99) 2024-11-22T04:38:28,671 INFO [M:0;8fc3ff0a63e6:32803 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-22T04:38:28,672 INFO [M:0;8fc3ff0a63e6:32803 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-22T04:38:28,672 DEBUG [M:0;8fc3ff0a63e6:32803 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-22T04:38:28,672 INFO [M:0;8fc3ff0a63e6:32803 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:38:28,672 DEBUG [M:0;8fc3ff0a63e6:32803 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:38:28,672 DEBUG [M:0;8fc3ff0a63e6:32803 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-22T04:38:28,672 DEBUG [M:0;8fc3ff0a63e6:32803 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:38:28,672 INFO [M:0;8fc3ff0a63e6:32803 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-22T04:38:28,686 DEBUG [M:0;8fc3ff0a63e6:32803 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ed660ffe7e3342539547adefdfb6fd61 is 82, key is hbase:meta,,1/info:regioninfo/1732250308211/Put/seqid=0 2024-11-22T04:38:28,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741839_1015 (size=5672) 2024-11-22T04:38:28,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741839_1015 (size=5672) 2024-11-22T04:38:28,690 INFO [M:0;8fc3ff0a63e6:32803 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ed660ffe7e3342539547adefdfb6fd61 2024-11-22T04:38:28,706 DEBUG [M:0;8fc3ff0a63e6:32803 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8bd49a8d72064a80a30070b5d99bb961 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732250308265/Put/seqid=0 2024-11-22T04:38:28,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741840_1016 (size=5275) 2024-11-22T04:38:28,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741840_1016 (size=5275) 2024-11-22T04:38:28,711 INFO [M:0;8fc3ff0a63e6:32803 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8bd49a8d72064a80a30070b5d99bb961 2024-11-22T04:38:28,727 DEBUG [M:0;8fc3ff0a63e6:32803 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0e9395de618f4ed3803530be73659616 is 69, key is 8fc3ff0a63e6,37065,1732250307136/rs:state/1732250307592/Put/seqid=0 2024-11-22T04:38:28,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741841_1017 (size=5156) 2024-11-22T04:38:28,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741841_1017 (size=5156) 2024-11-22T04:38:28,732 INFO [M:0;8fc3ff0a63e6:32803 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0e9395de618f4ed3803530be73659616 2024-11-22T04:38:28,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:38:28,744 INFO [RS:0;8fc3ff0a63e6:37065 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:38:28,744 INFO [RS:0;8fc3ff0a63e6:37065 {}] regionserver.HRegionServer(1031): Exiting; stopping=8fc3ff0a63e6,37065,1732250307136; zookeeper connection closed. 2024-11-22T04:38:28,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37065-0x10160d605390001, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:38:28,744 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7b96541e {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7b96541e 2024-11-22T04:38:28,744 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-22T04:38:28,748 DEBUG [M:0;8fc3ff0a63e6:32803 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/675c2d7a80694cc2b8b74b57a7d98cc3 is 52, key is load_balancer_on/state:d/1732250308368/Put/seqid=0 2024-11-22T04:38:28,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741842_1018 (size=5056) 2024-11-22T04:38:28,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741842_1018 (size=5056) 2024-11-22T04:38:28,752 INFO [M:0;8fc3ff0a63e6:32803 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/675c2d7a80694cc2b8b74b57a7d98cc3 2024-11-22T04:38:28,756 DEBUG [M:0;8fc3ff0a63e6:32803 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/ed660ffe7e3342539547adefdfb6fd61 as hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ed660ffe7e3342539547adefdfb6fd61 2024-11-22T04:38:28,760 INFO [M:0;8fc3ff0a63e6:32803 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/ed660ffe7e3342539547adefdfb6fd61, entries=8, sequenceid=29, filesize=5.5 K 2024-11-22T04:38:28,761 DEBUG [M:0;8fc3ff0a63e6:32803 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8bd49a8d72064a80a30070b5d99bb961 as hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8bd49a8d72064a80a30070b5d99bb961 2024-11-22T04:38:28,765 INFO [M:0;8fc3ff0a63e6:32803 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8bd49a8d72064a80a30070b5d99bb961, entries=3, sequenceid=29, filesize=5.2 K 2024-11-22T04:38:28,765 DEBUG [M:0;8fc3ff0a63e6:32803 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0e9395de618f4ed3803530be73659616 as hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0e9395de618f4ed3803530be73659616 2024-11-22T04:38:28,768 INFO [M:0;8fc3ff0a63e6:32803 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0e9395de618f4ed3803530be73659616, entries=1, sequenceid=29, filesize=5.0 K 2024-11-22T04:38:28,769 DEBUG [M:0;8fc3ff0a63e6:32803 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/675c2d7a80694cc2b8b74b57a7d98cc3 as hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/675c2d7a80694cc2b8b74b57a7d98cc3 2024-11-22T04:38:28,772 INFO [M:0;8fc3ff0a63e6:32803 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44051/user/jenkins/test-data/9c1c9d27-36c6-4645-8498-5568b617b979/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/675c2d7a80694cc2b8b74b57a7d98cc3, entries=1, sequenceid=29, filesize=4.9 K 2024-11-22T04:38:28,773 INFO [M:0;8fc3ff0a63e6:32803 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 101ms, sequenceid=29, compaction requested=false 2024-11-22T04:38:28,774 INFO [M:0;8fc3ff0a63e6:32803 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-22T04:38:28,774 DEBUG [M:0;8fc3ff0a63e6:32803 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732250308672Disabling compacts and flushes for region at 1732250308672Disabling writes for close at 1732250308672Obtaining lock to block concurrent updates at 1732250308672Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732250308672Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732250308673 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732250308673Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732250308673Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732250308685 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732250308686 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732250308693 (+7 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732250308706 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732250308706Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732250308715 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732250308727 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732250308727Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732250308735 (+8 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732250308747 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732250308747Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@473ce643: reopening flushed file at 1732250308756 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7fe5d561: reopening flushed file at 1732250308760 (+4 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d8d4ac4: reopening flushed file at 1732250308765 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6678be0b: reopening flushed file at 1732250308768 (+3 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 101ms, sequenceid=29, compaction requested=false at 1732250308773 (+5 ms)Writing region close event to WAL at 1732250308774 (+1 ms)Closed at 1732250308774 2024-11-22T04:38:28,775 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,775 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,775 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,775 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,775 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-22T04:38:28,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35201 is added to blk_1073741830_1006 (size=10311) 2024-11-22T04:38:28,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40183 is added to blk_1073741830_1006 (size=10311) 2024-11-22T04:38:28,777 INFO [M:0;8fc3ff0a63e6:32803 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-22T04:38:28,777 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-22T04:38:28,777 INFO [M:0;8fc3ff0a63e6:32803 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32803 2024-11-22T04:38:28,777 INFO [M:0;8fc3ff0a63e6:32803 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-22T04:38:28,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:38:28,890 INFO [M:0;8fc3ff0a63e6:32803 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-22T04:38:28,890 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32803-0x10160d605390000, quorum=127.0.0.1:51200, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-22T04:38:28,896 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@62b479b9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:38:28,897 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6724e6e5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:38:28,897 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:38:28,897 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c78c12c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:38:28,897 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@290d7c20{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/hadoop.log.dir/,STOPPED} 2024-11-22T04:38:28,900 WARN [BP-2097659817-172.17.0.2-1732250304782 heartbeating to localhost/127.0.0.1:44051 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:38:28,900 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:38:28,900 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:38:28,900 WARN [BP-2097659817-172.17.0.2-1732250304782 heartbeating to localhost/127.0.0.1:44051 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2097659817-172.17.0.2-1732250304782 (Datanode Uuid 1b97dde5-f2b7-4395-810a-c5fcd4a8e1b3) service to localhost/127.0.0.1:44051 2024-11-22T04:38:28,900 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/cluster_9f9d5431-10b8-eb30-d33a-620665f1d921/data/data3/current/BP-2097659817-172.17.0.2-1732250304782 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:38:28,901 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/cluster_9f9d5431-10b8-eb30-d33a-620665f1d921/data/data4/current/BP-2097659817-172.17.0.2-1732250304782 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:38:28,901 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:38:28,903 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@616bab4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-22T04:38:28,903 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2269c58e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:38:28,903 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:38:28,903 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59532081{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:38:28,903 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20dd8a9c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/hadoop.log.dir/,STOPPED} 2024-11-22T04:38:28,904 WARN [BP-2097659817-172.17.0.2-1732250304782 heartbeating to localhost/127.0.0.1:44051 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-22T04:38:28,904 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-22T04:38:28,904 WARN [BP-2097659817-172.17.0.2-1732250304782 heartbeating to localhost/127.0.0.1:44051 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2097659817-172.17.0.2-1732250304782 (Datanode Uuid 0721f1bf-f570-4604-b27e-b4cc2f897206) service to localhost/127.0.0.1:44051 2024-11-22T04:38:28,904 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-22T04:38:28,904 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/cluster_9f9d5431-10b8-eb30-d33a-620665f1d921/data/data1/current/BP-2097659817-172.17.0.2-1732250304782 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:38:28,905 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/cluster_9f9d5431-10b8-eb30-d33a-620665f1d921/data/data2/current/BP-2097659817-172.17.0.2-1732250304782 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-22T04:38:28,905 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-22T04:38:28,909 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@534394c4{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-22T04:38:28,909 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7acdff1a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-22T04:38:28,909 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-22T04:38:28,909 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@663b7fb1{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-22T04:38:28,910 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@a8a2fb4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-3@2/hbase-server/target/test-data/359f0180-2956-5273-95c4-a622a6bd957c/hadoop.log.dir/,STOPPED} 2024-11-22T04:38:28,915 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-22T04:38:28,928 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-22T04:38:28,935 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 229) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:44051 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:44051 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44051 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:44051 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44051 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (292906293) connection to localhost/127.0.0.1:44051 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=530 (was 509) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=210 (was 211), ProcessCount=11 (was 11), AvailableMemoryMB=9028 (was 9035)